system
stringclasses
7 values
input
stringlengths
10
94.5k
output
stringlengths
14
55.5k
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#ifndef CUDA_SGEMMN_H #define CUDA_SGEMMN_H // Written by Vasily Volkov. // Copyright (c) 2008, The Regents of the University of California. // All rights reserved. #include <stdio.h> #include "hip/hip_runtime.h" #define TIMER_TOLERANCE 0.1f #define BEGIN_TIMING( ) \ {\ unsigned int n_iterations; \ for( n_iterations = 1; n_iterations < 0x80000000; n_iterations *= 2 )\ {\ Q( hipDeviceSynchronize( ) );\ Q( hipEventRecord( start, 0 ) );\ for( unsigned int iteration = 0; iteration < n_iterations; iteration++ ){ #define END_TIMING( seconds ) }\ Q( hipEventRecord( end, 0 ) );\ Q( hipEventSynchronize( end ) );\ float milliseconds;\ Q( hipEventElapsedTime( &milliseconds, start, end ) );\ seconds = milliseconds/1e3f;\ if( seconds >= TIMER_TOLERANCE )\ break;\ }\ seconds /= n_iterations;\ } #define Q( condition ) {if( (condition) != 0 ) { printf( "\n FAILURE in %s, line %d\n", __FILE__, __LINE__ );exit( 1 );}} int ejecutar_sgemmNN( int items/*int argc, char **argv */); int ejecutar_matrixVector( int items/*int argc, char **argv */); int cublas_example(); #endif
.text .file "sgemmN.hip" .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_001239d4_00000000-6_sgemmN.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2879: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2879: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z5saxpyfPfS_ .type _Z5saxpyfPfS_, @function _Z5saxpyfPfS_: .LFB2057: .cfi_startproc endbr64 pushq %rax .cfi_def_cfa_offset 16 popq %rax .cfi_def_cfa_offset 8 subq $24, %rsp .cfi_def_cfa_offset 32 movl $1, 12(%rsp) movl 12(%rsp), %edi call exit@PLT .cfi_endproc .LFE2057: .size _Z5saxpyfPfS_, .-_Z5saxpyfPfS_ .globl _Z6saxpy2PKfS0_S0_Pf .type _Z6saxpy2PKfS0_S0_Pf, @function _Z6saxpy2PKfS0_S0_Pf: .LFB2058: .cfi_startproc endbr64 pushq %rax .cfi_def_cfa_offset 16 popq %rax .cfi_def_cfa_offset 8 subq $24, %rsp .cfi_def_cfa_offset 32 movl $1, 12(%rsp) movl 12(%rsp), %edi call exit@PLT .cfi_endproc .LFE2058: .size _Z6saxpy2PKfS0_S0_Pf, .-_Z6saxpy2PKfS0_S0_Pf .globl _Z6saxpy3PKfS0_Pf .type _Z6saxpy3PKfS0_Pf, @function _Z6saxpy3PKfS0_Pf: .LFB2059: .cfi_startproc endbr64 pushq %rax .cfi_def_cfa_offset 16 popq %rax .cfi_def_cfa_offset 8 subq $24, %rsp .cfi_def_cfa_offset 32 movl $1, 12(%rsp) movl 12(%rsp), %edi call exit@PLT .cfi_endproc .LFE2059: .size _Z6saxpy3PKfS0_Pf, .-_Z6saxpy3PKfS0_Pf .globl _Z7saxpy64PKfS0_Pf .type _Z7saxpy64PKfS0_Pf, @function _Z7saxpy64PKfS0_Pf: .LFB2060: .cfi_startproc endbr64 pushq %rax .cfi_def_cfa_offset 16 popq %rax .cfi_def_cfa_offset 8 subq $24, %rsp .cfi_def_cfa_offset 32 movl $1, 12(%rsp) movl 12(%rsp), %edi call exit@PLT .cfi_endproc .LFE2060: .size _Z7saxpy64PKfS0_Pf, .-_Z7saxpy64PKfS0_Pf .globl _Z7saxpy32PKfS0_PfS0_ .type _Z7saxpy32PKfS0_PfS0_, @function _Z7saxpy32PKfS0_PfS0_: .LFB2061: .cfi_startproc endbr64 pushq %rax .cfi_def_cfa_offset 16 popq %rax .cfi_def_cfa_offset 8 subq $24, %rsp .cfi_def_cfa_offset 32 movl $1, 12(%rsp) movl 12(%rsp), %edi call exit@PLT .cfi_endproc .LFE2061: .size _Z7saxpy32PKfS0_PfS0_, .-_Z7saxpy32PKfS0_PfS0_ .globl _Z10redux32sumPKfPf .type _Z10redux32sumPKfPf, @function _Z10redux32sumPKfPf: .LFB2062: .cfi_startproc endbr64 pushq %rax .cfi_def_cfa_offset 16 popq %rax .cfi_def_cfa_offset 8 subq $24, %rsp .cfi_def_cfa_offset 32 movl $1, 12(%rsp) movl 12(%rsp), %edi call exit@PLT .cfi_endproc .LFE2062: .size _Z10redux32sumPKfPf, .-_Z10redux32sumPKfPf .globl _Z10redux64sumPKfPf .type _Z10redux64sumPKfPf, @function _Z10redux64sumPKfPf: .LFB2063: .cfi_startproc endbr64 pushq %rax .cfi_def_cfa_offset 16 popq %rax .cfi_def_cfa_offset 8 subq $24, %rsp .cfi_def_cfa_offset 32 movl $1, 12(%rsp) movl 12(%rsp), %edi call exit@PLT .cfi_endproc .LFE2063: .size _Z10redux64sumPKfPf, .-_Z10redux64sumPKfPf .globl _Z10redux16sumPKfPf .type _Z10redux16sumPKfPf, @function _Z10redux16sumPKfPf: .LFB2064: .cfi_startproc endbr64 pushq %rax .cfi_def_cfa_offset 16 popq %rax .cfi_def_cfa_offset 8 subq $24, %rsp .cfi_def_cfa_offset 32 movl $1, 12(%rsp) movl 12(%rsp), %edi call exit@PLT .cfi_endproc .LFE2064: .size _Z10redux16sumPKfPf, .-_Z10redux16sumPKfPf .globl _Z4fillPfii .type _Z4fillPfii, @function _Z4fillPfii: .LFB2066: .cfi_startproc endbr64 testl %esi, %esi jle .L24 pushq %r13 .cfi_def_cfa_offset 16 .cfi_offset 13, -16 pushq %r12 .cfi_def_cfa_offset 24 .cfi_offset 12, -24 pushq %rbp .cfi_def_cfa_offset 32 .cfi_offset 6, -32 pushq %rbx .cfi_def_cfa_offset 40 .cfi_offset 3, -40 subq $8, %rsp .cfi_def_cfa_offset 48 movl %edx, %ebp leal 1(%rdx,%rdx), %r12d movq %rdi, %rbx movslq %esi, %rsi leaq (%rdi,%rsi,4), %r13 .L21: call rand@PLT cltd idivl %r12d subl %ebp, %edx pxor %xmm0, %xmm0 cvtsi2ssl %edx, %xmm0 pxor %xmm1, %xmm1 cvtsi2ssl %ebp, %xmm1 addss .LC0(%rip), %xmm1 divss %xmm1, %xmm0 movss %xmm0, (%rbx) addq $4, %rbx cmpq %r13, %rbx jne .L21 addq $8, %rsp .cfi_def_cfa_offset 40 popq %rbx .cfi_def_cfa_offset 32 popq %rbp .cfi_def_cfa_offset 24 popq %r12 .cfi_def_cfa_offset 16 popq %r13 .cfi_def_cfa_offset 8 ret .L24: .cfi_restore 3 .cfi_restore 6 .cfi_restore 12 .cfi_restore 13 ret .cfi_endproc .LFE2066: .size _Z4fillPfii, .-_Z4fillPfii .globl _Z4diffiiPfiS_i .type _Z4diffiiPfiS_i, @function _Z4diffiiPfiS_i: .LFB2067: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $56, %rsp .cfi_def_cfa_offset 112 movl %edi, 8(%rsp) movl %esi, 12(%rsp) movq %rdx, 24(%rsp) movl %ecx, 16(%rsp) movq %r8, 32(%rsp) movl %r9d, 20(%rsp) testl %esi, %esi jle .L33 movl $0, %r15d movl $0, %r14d movl $0, %r13d pxor %xmm1, %xmm1 movslq %edi, %rax movq %rax, 40(%rsp) jmp .L29 .L31: movslq %r14d, %rax movq 24(%rsp), %rcx leaq (%rcx,%rax,4), %rbx movslq %r15d, %rdx movq 32(%rsp), %rsi leaq (%rsi,%rdx,4), %rbp movq 40(%rsp), %rdi addq %rdi, %rax leaq (%rcx,%rax,4), %r12 .L30: movss (%rbx), %xmm2 subss 0(%rbp), %xmm2 andps .LC2(%rip), %xmm2 movaps %xmm2, %xmm0 call fmaxf@PLT movaps %xmm0, %xmm1 addq $4, %rbx addq $4, %rbp cmpq %r12, %rbx jne .L30 .L32: addl $1, %r13d movl 16(%rsp), %eax addl %eax, %r14d movl 20(%rsp), %eax addl %eax, %r15d cmpl %r13d, 12(%rsp) je .L27 .L29: cmpl $0, 8(%rsp) jg .L31 jmp .L32 .L33: pxor %xmm1, %xmm1 .L27: movaps %xmm1, %xmm0 addq $56, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2067: .size _Z4diffiiPfiS_i, .-_Z4diffiiPfiS_i .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC3: .string "device memory allocation failed" .section .rodata.str1.1,"aMS",@progbits,1 .LC4: .string "CUBLAS initialization failed\n" .LC5: .string "data download failed" .section .rodata.str1.8 .align 8 .LC6: .string "/home/ubuntu/Datasets/stackv2/train-structured/immagery/air/master/AirLib/cuda/sgemmN.cu" .section .rodata.str1.1 .LC7: .string "\n FAILURE in %s, line %d\n" .LC8: .string "cublasSgemv failed" .LC9: .string "data upload failed" .section .rodata.str1.8 .align 8 .LC10: .string "Multiplicacion mxv de %d elems.\n" .align 8 .LC11: .string "Sumatorio completo en CUDA: %f en %f + %f ms.\n" .align 8 .LC14: .string "Sumatorio completo en CPU: %f en %f y %f ms.\n" .text .globl _Z14cublas_examplev .type _Z14cublas_examplev, @function _Z14cublas_examplev: .LFB2874: .cfi_startproc endbr64 pushq %r14 .cfi_def_cfa_offset 16 .cfi_offset 14, -16 pushq %r13 .cfi_def_cfa_offset 24 .cfi_offset 13, -24 pushq %r12 .cfi_def_cfa_offset 32 .cfi_offset 12, -32 pushq %rbp .cfi_def_cfa_offset 40 .cfi_offset 6, -40 pushq %rbx .cfi_def_cfa_offset 48 .cfi_offset 3, -48 subq $96, %rsp .cfi_def_cfa_offset 144 movq %fs:40, %rax movq %rax, 88(%rsp) xorl %eax, %eax movl $536903680, %edi call malloc@PLT movq %rax, %rbp movl $1073741824, %edi call malloc@PLT movq %rax, %rbx movl $65536, %edi call malloc@PLT movq %rax, %r12 movl $31, %edx movl $268435456, %esi movq %rbx, %rdi call _Z4fillPfii movl $31, %edx movl $16384, %esi movq %r12, %rdi call _Z4fillPfii movl $0, %r8d movl $0, %edi movl $0, %esi jmp .L37 .L38: addl $1, %eax addl $16384, %edx cmpl $16384, %eax je .L62 .L39: cmpl %eax, %edi jl .L38 movslq %edx, %rcx movss (%rbx,%rcx,4), %xmm0 leal (%rax,%r8), %ecx movslq %ecx, %rcx movss %xmm0, (%rbx,%rcx,4) movslq %esi, %rcx movss %xmm0, 0(%rbp,%rcx,4) addl $1, %esi jmp .L38 .L62: addl $1, %edi addl $16384, %r8d cmpl $16384, %edi je .L40 .L37: movl %edi, %edx movl $0, %eax jmp .L39 .L40: movl $65536, %edi call malloc@PLT movq %rax, %r13 leaq 65536(%rax), %rdx .L41: movl $0x00000000, (%rax) addq $4, %rax cmpq %rdx, %rax jne .L41 leaq 48(%rsp), %rdi movl $1073741824, %esi call cudaMalloc@PLT leaq 56(%rsp), %rdi movl $65536, %esi call cudaMalloc@PLT leaq 64(%rsp), %rdi movl $65536, %esi call cudaMalloc@PLT leaq 20(%rsp), %rdi movl $536903680, %esi call cudaMalloc@PLT testl %eax, %eax jne .L63 leaq 40(%rsp), %rdi call cublasCreate_v2@PLT testl %eax, %eax jne .L64 movl $1, %esi movq 40(%rsp), %rdi call cublasSetAtomicsMode@PLT subq $8, %rsp .cfi_def_cfa_offset 152 pushq $16384 .cfi_def_cfa_offset 160 movq 64(%rsp), %r9 movl $16384, %r8d movq %rbx, %rcx movl $4, %edx movl $16384, %esi movl $16384, %edi call cublasSetMatrix@PLT addq $16, %rsp .cfi_def_cfa_offset 144 movl $1, %r9d movq 56(%rsp), %r8 movl $1, %ecx movq %r12, %rdx movl $4, %esi movl $16384, %edi call cublasSetVector@PLT movl $1, %r9d movq 64(%rsp), %r8 movl $1, %ecx movq %r13, %rdx movl $4, %esi movl $16384, %edi call cublasSetVector@PLT testl %eax, %eax jne .L65 movl $0x3f800000, 24(%rsp) movl $0x00000000, 28(%rsp) leaq 72(%rsp), %rdi call cudaEventCreate@PLT testl %eax, %eax jne .L66 leaq 80(%rsp), %rdi call cudaEventCreate@PLT testl %eax, %eax jne .L67 movl $0, %esi movq 72(%rsp), %rdi call cudaEventRecord@PLT movl $4, %edi call malloc@PLT movq %rax, %rbp pushq $1 .cfi_def_cfa_offset 152 pushq 72(%rsp) .cfi_def_cfa_offset 160 leaq 44(%rsp), %rax pushq %rax .cfi_def_cfa_offset 168 pushq $1 .cfi_def_cfa_offset 176 pushq 88(%rsp) .cfi_def_cfa_offset 184 pushq $16384 .cfi_def_cfa_offset 192 movq 96(%rsp), %r9 leaq 72(%rsp), %r8 movl $16384, %ecx movl $16384, %edx movl $1, %esi movq 88(%rsp), %rdi call cublasSgemv_v2@PLT addq $48, %rsp .cfi_def_cfa_offset 144 testl %eax, %eax jne .L68 movl $0, %esi movq 80(%rsp), %rdi call cudaEventRecord@PLT movq 80(%rsp), %rdi call cudaEventSynchronize@PLT leaq 32(%rsp), %rdi movq 80(%rsp), %rdx movq 72(%rsp), %rsi call cudaEventElapsedTime@PLT movl $0, %esi movq 72(%rsp), %rdi call cudaEventRecord@PLT subq $8, %rsp .cfi_def_cfa_offset 152 pushq %rbp .cfi_def_cfa_offset 160 movl $1, %r9d movq 80(%rsp), %r8 movl $1, %ecx movq 72(%rsp), %rdx movl $16384, %esi movq 56(%rsp), %rdi call cublasSdot_v2@PLT addq $16, %rsp .cfi_def_cfa_offset 144 movl $0, %esi movq 80(%rsp), %rdi call cudaEventRecord@PLT movq 80(%rsp), %rdi call cudaEventSynchronize@PLT leaq 36(%rsp), %rdi movq 80(%rsp), %rdx movq 72(%rsp), %rsi call cudaEventElapsedTime@PLT movl $1, %r9d movq %r13, %r8 movl $1, %ecx movq 64(%rsp), %rdx movl $4, %esi movl $16384, %edi call cublasGetVector@PLT testl %eax, %eax jne .L69 movq 56(%rsp), %rdi call cudaFree@PLT movq 40(%rsp), %rdi call cublasDestroy_v2@PLT call clock@PLT movq %rax, %r14 movl $0, %edx movl $0x00000000, 12(%rsp) .L50: movl $0, %eax pxor %xmm1, %xmm1 .L51: movss (%rbx,%rax), %xmm0 mulss (%r12,%rax), %xmm0 addss %xmm0, %xmm1 addq $4, %rax cmpq $65536, %rax jne .L51 mulss (%r12,%rdx,4), %xmm1 addss 12(%rsp), %xmm1 movss %xmm1, 12(%rsp) addq $1, %rdx addq $65536, %rbx cmpq $16384, %rdx jne .L50 call clock@PLT movq %rax, %rbx call clock@PLT movq %rax, %r13 movl $1000, %edx .L53: movl $16384, %eax .L54: subl $1, %eax jne .L54 subl $1, %edx jne .L53 call clock@PLT movq %rax, %r12 movl $16384, %edx leaq .LC10(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT pxor %xmm0, %xmm0 cvtss2sd 0(%rbp), %xmm0 pxor %xmm2, %xmm2 cvtss2sd 36(%rsp), %xmm2 pxor %xmm1, %xmm1 cvtss2sd 32(%rsp), %xmm1 leaq .LC11(%rip), %rsi movl $2, %edi movl $3, %eax call __printf_chk@PLT subq %r13, %r12 pxor %xmm2, %xmm2 cvtsi2sdq %r12, %xmm2 movsd .LC12(%rip), %xmm0 divsd %xmm0, %xmm2 movsd .LC13(%rip), %xmm3 subq %r14, %rbx pxor %xmm1, %xmm1 cvtsi2sdq %rbx, %xmm1 divsd %xmm0, %xmm1 pxor %xmm0, %xmm0 cvtss2sd 12(%rsp), %xmm0 mulsd %xmm3, %xmm2 mulsd %xmm3, %xmm1 leaq .LC14(%rip), %rsi movl $2, %edi movl $3, %eax call __printf_chk@PLT movl $0, %eax jmp .L36 .L63: leaq .LC3(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $1, %eax .L36: movq 88(%rsp), %rdx subq %fs:40, %rdx jne .L70 addq $96, %rsp .cfi_remember_state .cfi_def_cfa_offset 48 popq %rbx .cfi_def_cfa_offset 40 popq %rbp .cfi_def_cfa_offset 32 popq %r12 .cfi_def_cfa_offset 24 popq %r13 .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 ret .L64: .cfi_restore_state leaq .LC4(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $1, %eax jmp .L36 .L65: leaq .LC5(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movq 56(%rsp), %rdi call cudaFree@PLT movq 40(%rsp), %rdi call cublasDestroy_v2@PLT movl $1, %eax jmp .L36 .L66: movl $528, %ecx leaq .LC6(%rip), %rdx leaq .LC7(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $1, %edi call exit@PLT .L67: movl $529, %ecx leaq .LC6(%rip), %rdx leaq .LC7(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $1, %edi call exit@PLT .L68: leaq .LC8(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movq 56(%rsp), %rdi call cudaFree@PLT movq 40(%rsp), %rdi call cublasDestroy_v2@PLT movl $1, %eax jmp .L36 .L69: leaq .LC9(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movq 56(%rsp), %rdi call cudaFree@PLT movq 40(%rsp), %rdi call cublasDestroy_v2@PLT movl $1, %eax jmp .L36 .L70: call __stack_chk_fail@PLT .cfi_endproc .LFE2874: .size _Z14cublas_examplev, .-_Z14cublas_examplev .globl _Z16ejecutar_sgemmNNi .type _Z16ejecutar_sgemmNNi, @function _Z16ejecutar_sgemmNNi: .LFB2875: .cfi_startproc endbr64 movl $0, %eax ret .cfi_endproc .LFE2875: .size _Z16ejecutar_sgemmNNi, .-_Z16ejecutar_sgemmNNi .globl _Z38__device_stub__Z7sgemmNTPKfiS0_iPfiiffPKfiS0_iPfiiff .type _Z38__device_stub__Z7sgemmNTPKfiS0_iPfiiffPKfiS0_iPfiiff, @function _Z38__device_stub__Z7sgemmNTPKfiS0_iPfiiffPKfiS0_iPfiiff: .LFB2901: .cfi_startproc endbr64 subq $200, %rsp .cfi_def_cfa_offset 208 movq %rdi, 40(%rsp) movl %esi, 36(%rsp) movq %rdx, 24(%rsp) movl %ecx, 32(%rsp) movq %r8, 16(%rsp) movl %r9d, 12(%rsp) movss %xmm0, 8(%rsp) movss %xmm1, 4(%rsp) movq %fs:40, %rax movq %rax, 184(%rsp) xorl %eax, %eax leaq 40(%rsp), %rax movq %rax, 112(%rsp) leaq 36(%rsp), %rax movq %rax, 120(%rsp) leaq 24(%rsp), %rax movq %rax, 128(%rsp) leaq 32(%rsp), %rax movq %rax, 136(%rsp) leaq 16(%rsp), %rax movq %rax, 144(%rsp) leaq 12(%rsp), %rax movq %rax, 152(%rsp) leaq 208(%rsp), %rax movq %rax, 160(%rsp) leaq 8(%rsp), %rax movq %rax, 168(%rsp) leaq 4(%rsp), %rax movq %rax, 176(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) movl $1, 72(%rsp) movl $1, 76(%rsp) movl $1, 80(%rsp) movl $1, 84(%rsp) leaq 56(%rsp), %rcx leaq 48(%rsp), %rdx leaq 76(%rsp), %rsi leaq 64(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L76 .L72: movq 184(%rsp), %rax subq %fs:40, %rax jne .L77 addq $200, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L76: .cfi_restore_state pushq 56(%rsp) .cfi_def_cfa_offset 216 pushq 56(%rsp) .cfi_def_cfa_offset 224 leaq 128(%rsp), %r9 movq 92(%rsp), %rcx movl 100(%rsp), %r8d movq 80(%rsp), %rsi movl 88(%rsp), %edx leaq sgemmNT(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 208 jmp .L72 .L77: call __stack_chk_fail@PLT .cfi_endproc .LFE2901: .size _Z38__device_stub__Z7sgemmNTPKfiS0_iPfiiffPKfiS0_iPfiiff, .-_Z38__device_stub__Z7sgemmNTPKfiS0_iPfiiffPKfiS0_iPfiiff .globl sgemmNT .type sgemmNT, @function sgemmNT: .LFB2902: .cfi_startproc endbr64 subq $16, %rsp .cfi_def_cfa_offset 24 movl 24(%rsp), %eax pushq %rax .cfi_def_cfa_offset 32 call _Z38__device_stub__Z7sgemmNTPKfiS0_iPfiiffPKfiS0_iPfiiff addq $24, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2902: .size sgemmNT, .-sgemmNT .globl _Z38__device_stub__Z7sgemmNNPKfiS0_iPfiiffPKfiS0_iPfiiff .type _Z38__device_stub__Z7sgemmNNPKfiS0_iPfiiffPKfiS0_iPfiiff, @function _Z38__device_stub__Z7sgemmNNPKfiS0_iPfiiffPKfiS0_iPfiiff: .LFB2903: .cfi_startproc endbr64 subq $200, %rsp .cfi_def_cfa_offset 208 movq %rdi, 40(%rsp) movl %esi, 36(%rsp) movq %rdx, 24(%rsp) movl %ecx, 32(%rsp) movq %r8, 16(%rsp) movl %r9d, 12(%rsp) movss %xmm0, 8(%rsp) movss %xmm1, 4(%rsp) movq %fs:40, %rax movq %rax, 184(%rsp) xorl %eax, %eax leaq 40(%rsp), %rax movq %rax, 112(%rsp) leaq 36(%rsp), %rax movq %rax, 120(%rsp) leaq 24(%rsp), %rax movq %rax, 128(%rsp) leaq 32(%rsp), %rax movq %rax, 136(%rsp) leaq 16(%rsp), %rax movq %rax, 144(%rsp) leaq 12(%rsp), %rax movq %rax, 152(%rsp) leaq 208(%rsp), %rax movq %rax, 160(%rsp) leaq 8(%rsp), %rax movq %rax, 168(%rsp) leaq 4(%rsp), %rax movq %rax, 176(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) movl $1, 72(%rsp) movl $1, 76(%rsp) movl $1, 80(%rsp) movl $1, 84(%rsp) leaq 56(%rsp), %rcx leaq 48(%rsp), %rdx leaq 76(%rsp), %rsi leaq 64(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L84 .L80: movq 184(%rsp), %rax subq %fs:40, %rax jne .L85 addq $200, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L84: .cfi_restore_state pushq 56(%rsp) .cfi_def_cfa_offset 216 pushq 56(%rsp) .cfi_def_cfa_offset 224 leaq 128(%rsp), %r9 movq 92(%rsp), %rcx movl 100(%rsp), %r8d movq 80(%rsp), %rsi movl 88(%rsp), %edx leaq sgemmNN(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 208 jmp .L80 .L85: call __stack_chk_fail@PLT .cfi_endproc .LFE2903: .size _Z38__device_stub__Z7sgemmNNPKfiS0_iPfiiffPKfiS0_iPfiiff, .-_Z38__device_stub__Z7sgemmNNPKfiS0_iPfiiffPKfiS0_iPfiiff .globl sgemmNN .type sgemmNN, @function sgemmNN: .LFB2904: .cfi_startproc endbr64 subq $16, %rsp .cfi_def_cfa_offset 24 movl 24(%rsp), %eax pushq %rax .cfi_def_cfa_offset 32 call _Z38__device_stub__Z7sgemmNNPKfiS0_iPfiiffPKfiS0_iPfiiff addq $24, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2904: .size sgemmNN, .-sgemmNN .globl ourSgemm .type ourSgemm, @function ourSgemm: .LFB2065: .cfi_startproc endbr64 pushq %rbp .cfi_def_cfa_offset 16 .cfi_offset 6, -16 pushq %rbx .cfi_def_cfa_offset 24 .cfi_offset 3, -24 subq $56, %rsp .cfi_def_cfa_offset 80 movl %r8d, %ebp movss %xmm0, 8(%rsp) movq %r9, %rbx movss %xmm1, 12(%rsp) leal 63(%rdx), %eax testl %edx, %edx cmovns %edx, %eax sarl $6, %eax movl %eax, 24(%rsp) leal 15(%rcx), %eax testl %ecx, %ecx cmovns %ecx, %eax sarl $4, %eax movl %eax, 28(%rsp) movl $1, 32(%rsp) movl $16, 36(%rsp) movl $4, 40(%rsp) movl $1, 44(%rsp) andl $-33, %esi cmpb $78, %sil jne .L89 movl 44(%rsp), %ecx movl $0, %r9d movl $0, %r8d movq 36(%rsp), %rdx movq 24(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L92 .L88: addq $56, %rsp .cfi_remember_state .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 ret .L92: .cfi_restore_state subq $8, %rsp .cfi_def_cfa_offset 88 pushq %rbp .cfi_def_cfa_offset 96 movss 28(%rsp), %xmm1 movss 24(%rsp), %xmm0 movl 128(%rsp), %r9d movq 120(%rsp), %r8 movl 112(%rsp), %ecx movq 104(%rsp), %rdx movl 96(%rsp), %esi movq %rbx, %rdi call _Z38__device_stub__Z7sgemmNNPKfiS0_iPfiiffPKfiS0_iPfiiff addq $16, %rsp .cfi_def_cfa_offset 80 jmp .L88 .L89: movl 44(%rsp), %ecx movl $0, %r9d movl $0, %r8d movq 36(%rsp), %rdx movq 24(%rsp), %rdi movl 32(%rsp), %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax jne .L88 subq $8, %rsp .cfi_def_cfa_offset 88 pushq %rbp .cfi_def_cfa_offset 96 movss 28(%rsp), %xmm1 movss 24(%rsp), %xmm0 movl 128(%rsp), %r9d movq 120(%rsp), %r8 movl 112(%rsp), %ecx movq 104(%rsp), %rdx movl 96(%rsp), %esi movq %rbx, %rdi call _Z38__device_stub__Z7sgemmNTPKfiS0_iPfiiffPKfiS0_iPfiiff addq $16, %rsp .cfi_def_cfa_offset 80 jmp .L88 .cfi_endproc .LFE2065: .size ourSgemm, .-ourSgemm .globl _Z47__device_stub__Z16vec_mat_vec_multPKfiS0_iPfiffPKfiS0_iPfiff .type _Z47__device_stub__Z16vec_mat_vec_multPKfiS0_iPfiffPKfiS0_iPfiff, @function _Z47__device_stub__Z16vec_mat_vec_multPKfiS0_iPfiffPKfiS0_iPfiff: .LFB2905: .cfi_startproc endbr64 subq $200, %rsp .cfi_def_cfa_offset 208 movq %rdi, 40(%rsp) movl %esi, 36(%rsp) movq %rdx, 24(%rsp) movl %ecx, 32(%rsp) movq %r8, 16(%rsp) movl %r9d, 12(%rsp) movss %xmm0, 8(%rsp) movss %xmm1, 4(%rsp) movq %fs:40, %rax movq %rax, 184(%rsp) xorl %eax, %eax leaq 40(%rsp), %rax movq %rax, 112(%rsp) leaq 36(%rsp), %rax movq %rax, 120(%rsp) leaq 24(%rsp), %rax movq %rax, 128(%rsp) leaq 32(%rsp), %rax movq %rax, 136(%rsp) leaq 16(%rsp), %rax movq %rax, 144(%rsp) leaq 12(%rsp), %rax movq %rax, 152(%rsp) leaq 8(%rsp), %rax movq %rax, 160(%rsp) leaq 4(%rsp), %rax movq %rax, 168(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) movl $1, 72(%rsp) movl $1, 76(%rsp) movl $1, 80(%rsp) movl $1, 84(%rsp) leaq 56(%rsp), %rcx leaq 48(%rsp), %rdx leaq 76(%rsp), %rsi leaq 64(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L97 .L93: movq 184(%rsp), %rax subq %fs:40, %rax jne .L98 addq $200, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L97: .cfi_restore_state pushq 56(%rsp) .cfi_def_cfa_offset 216 pushq 56(%rsp) .cfi_def_cfa_offset 224 leaq 128(%rsp), %r9 movq 92(%rsp), %rcx movl 100(%rsp), %r8d movq 80(%rsp), %rsi movl 88(%rsp), %edx leaq vec_mat_vec_mult(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 208 jmp .L93 .L98: call __stack_chk_fail@PLT .cfi_endproc .LFE2905: .size _Z47__device_stub__Z16vec_mat_vec_multPKfiS0_iPfiffPKfiS0_iPfiff, .-_Z47__device_stub__Z16vec_mat_vec_multPKfiS0_iPfiffPKfiS0_iPfiff .globl vec_mat_vec_mult .type vec_mat_vec_mult, @function vec_mat_vec_mult: .LFB2906: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z47__device_stub__Z16vec_mat_vec_multPKfiS0_iPfiffPKfiS0_iPfiff addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2906: .size vec_mat_vec_mult, .-vec_mat_vec_mult .globl _Z37__device_stub__Z6vmSymvPKfiS0_iPfiiffPKfiS0_iPfiiff .type _Z37__device_stub__Z6vmSymvPKfiS0_iPfiiffPKfiS0_iPfiiff, @function _Z37__device_stub__Z6vmSymvPKfiS0_iPfiiffPKfiS0_iPfiiff: .LFB2907: .cfi_startproc endbr64 subq $200, %rsp .cfi_def_cfa_offset 208 movq %rdi, 40(%rsp) movl %esi, 36(%rsp) movq %rdx, 24(%rsp) movl %ecx, 32(%rsp) movq %r8, 16(%rsp) movl %r9d, 12(%rsp) movss %xmm0, 8(%rsp) movss %xmm1, 4(%rsp) movq %fs:40, %rax movq %rax, 184(%rsp) xorl %eax, %eax leaq 40(%rsp), %rax movq %rax, 112(%rsp) leaq 36(%rsp), %rax movq %rax, 120(%rsp) leaq 24(%rsp), %rax movq %rax, 128(%rsp) leaq 32(%rsp), %rax movq %rax, 136(%rsp) leaq 16(%rsp), %rax movq %rax, 144(%rsp) leaq 12(%rsp), %rax movq %rax, 152(%rsp) leaq 208(%rsp), %rax movq %rax, 160(%rsp) leaq 8(%rsp), %rax movq %rax, 168(%rsp) leaq 4(%rsp), %rax movq %rax, 176(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) movl $1, 72(%rsp) movl $1, 76(%rsp) movl $1, 80(%rsp) movl $1, 84(%rsp) leaq 56(%rsp), %rcx leaq 48(%rsp), %rdx leaq 76(%rsp), %rsi leaq 64(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L105 .L101: movq 184(%rsp), %rax subq %fs:40, %rax jne .L106 addq $200, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L105: .cfi_restore_state pushq 56(%rsp) .cfi_def_cfa_offset 216 pushq 56(%rsp) .cfi_def_cfa_offset 224 leaq 128(%rsp), %r9 movq 92(%rsp), %rcx movl 100(%rsp), %r8d movq 80(%rsp), %rsi movl 88(%rsp), %edx leaq vmSymv(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 208 jmp .L101 .L106: call __stack_chk_fail@PLT .cfi_endproc .LFE2907: .size _Z37__device_stub__Z6vmSymvPKfiS0_iPfiiffPKfiS0_iPfiiff, .-_Z37__device_stub__Z6vmSymvPKfiS0_iPfiiffPKfiS0_iPfiiff .globl vmSymv .type vmSymv, @function vmSymv: .LFB2908: .cfi_startproc endbr64 subq $16, %rsp .cfi_def_cfa_offset 24 movl 24(%rsp), %eax pushq %rax .cfi_def_cfa_offset 32 call _Z37__device_stub__Z6vmSymvPKfiS0_iPfiiffPKfiS0_iPfiiff addq $24, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2908: .size vmSymv, .-vmSymv .globl _Z33__device_stub__Z3vmvPKfiS0_iPfiffPKfiS0_iPfiff .type _Z33__device_stub__Z3vmvPKfiS0_iPfiffPKfiS0_iPfiff, @function _Z33__device_stub__Z3vmvPKfiS0_iPfiffPKfiS0_iPfiff: .LFB2909: .cfi_startproc endbr64 subq $200, %rsp .cfi_def_cfa_offset 208 movq %rdi, 40(%rsp) movl %esi, 36(%rsp) movq %rdx, 24(%rsp) movl %ecx, 32(%rsp) movq %r8, 16(%rsp) movl %r9d, 12(%rsp) movss %xmm0, 8(%rsp) movss %xmm1, 4(%rsp) movq %fs:40, %rax movq %rax, 184(%rsp) xorl %eax, %eax leaq 40(%rsp), %rax movq %rax, 112(%rsp) leaq 36(%rsp), %rax movq %rax, 120(%rsp) leaq 24(%rsp), %rax movq %rax, 128(%rsp) leaq 32(%rsp), %rax movq %rax, 136(%rsp) leaq 16(%rsp), %rax movq %rax, 144(%rsp) leaq 12(%rsp), %rax movq %rax, 152(%rsp) leaq 8(%rsp), %rax movq %rax, 160(%rsp) leaq 4(%rsp), %rax movq %rax, 168(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) movl $1, 72(%rsp) movl $1, 76(%rsp) movl $1, 80(%rsp) movl $1, 84(%rsp) leaq 56(%rsp), %rcx leaq 48(%rsp), %rdx leaq 76(%rsp), %rsi leaq 64(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L113 .L109: movq 184(%rsp), %rax subq %fs:40, %rax jne .L114 addq $200, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L113: .cfi_restore_state pushq 56(%rsp) .cfi_def_cfa_offset 216 pushq 56(%rsp) .cfi_def_cfa_offset 224 leaq 128(%rsp), %r9 movq 92(%rsp), %rcx movl 100(%rsp), %r8d movq 80(%rsp), %rsi movl 88(%rsp), %edx leaq vmv(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 208 jmp .L109 .L114: call __stack_chk_fail@PLT .cfi_endproc .LFE2909: .size _Z33__device_stub__Z3vmvPKfiS0_iPfiffPKfiS0_iPfiff, .-_Z33__device_stub__Z3vmvPKfiS0_iPfiffPKfiS0_iPfiff .globl vmv .type vmv, @function vmv: .LFB2910: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z33__device_stub__Z3vmvPKfiS0_iPfiffPKfiS0_iPfiff addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2910: .size vmv, .-vmv .section .rodata.str1.8 .align 8 .LC17: .string "\nDevice: %s, %.0f MHz clock, %.0f MB memory.\n" .section .rodata.str1.1 .LC18: .string "%d Elementos.\n" .section .rodata.str1.8 .align 8 .LC19: .string "Resultado Final en cuda: %f en %f ms\n" .align 8 .LC20: .string "Resultado Final en cpu: %f en %f ms\n" .text .globl _Z21ejecutar_matrixVectori .type _Z21ejecutar_matrixVectori, @function _Z21ejecutar_matrixVectori: .LFB2876: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $1160, %rsp .cfi_def_cfa_offset 1216 movq %fs:40, %rax movq %rax, 1144(%rsp) xorl %eax, %eax movl $16, %eax cmpl %eax, %edi cmovge %edi, %eax movl %eax, %r13d movl $0, %edi call cudaSetDevice@PLT testl %eax, %eax jne .L142 leaq 112(%rsp), %rdi movl $0, %esi call cudaGetDeviceProperties_v2@PLT testl %eax, %eax jne .L143 movq 400(%rsp), %rax testq %rax, %rax js .L120 pxor %xmm1, %xmm1 cvtsi2ssq %rax, %xmm1 .L121: movss .LC15(%rip), %xmm0 mulss %xmm0, %xmm1 mulss %xmm0, %xmm1 pxor %xmm0, %xmm0 cvtsi2ssl 460(%rsp), %xmm0 divss .LC16(%rip), %xmm0 cvtss2sd %xmm0, %xmm0 leaq 112(%rsp), %rdx cvtss2sd %xmm1, %xmm1 leaq .LC17(%rip), %rsi movl $2, %edi movl $2, %eax call __printf_chk@PLT leaq 40(%rsp), %rdi call cudaEventCreate@PLT testl %eax, %eax jne .L144 leaq 48(%rsp), %rdi call cudaEventCreate@PLT movl %eax, %r12d testl %eax, %eax jne .L145 movl %r13d, %edx leaq .LC18(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT leal 1(%r13), %eax imull %r13d, %eax movl %eax, %edi shrl $31, %edi addl %eax, %edi sarl %edi movslq %edi, %rdi salq $2, %rdi call malloc@PLT movq %rax, 24(%rsp) movl %r13d, %r14d imull %r13d, %r14d movslq %r14d, %rax leaq 0(,%rax,4), %rcx movq %rcx, %rdi movq %rcx, (%rsp) call malloc@PLT movq %rax, 16(%rsp) movslq %r13d, %r15 leaq 0(,%r15,4), %rbx movq %rbx, %rdi call malloc@PLT movq %rax, %rbp movq %rbx, %rdi call malloc@PLT movq %rax, 8(%rsp) movl $31, %edx movl %r14d, %esi movq 16(%rsp), %r14 movq %r14, %rdi call _Z4fillPfii movl $31, %edx movl %r13d, %esi movq %rbp, %rdi call _Z4fillPfii movl %r13d, %r8d movl $0, %edi movl $0, %r9d movq 24(%rsp), %r10 movq %r14, %r11 jmp .L124 .L142: movl $856, %ecx leaq .LC6(%rip), %rdx leaq .LC7(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $1, %edi call exit@PLT .L143: movl $859, %ecx leaq .LC6(%rip), %rdx leaq .LC7(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $1, %edi call exit@PLT .L120: movq %rax, %rdx shrq %rdx andl $1, %eax orq %rax, %rdx pxor %xmm1, %xmm1 cvtsi2ssq %rdx, %xmm1 addss %xmm1, %xmm1 jmp .L121 .L144: movl $863, %ecx leaq .LC6(%rip), %rdx leaq .LC7(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $1, %edi call exit@PLT .L145: movl $864, %ecx leaq .LC6(%rip), %rdx leaq .LC7(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $1, %edi call exit@PLT .L127: movl %r8d, %edx salq $2, %rdx movslq %r12d, %rax addq %rdi, %rax leaq (%r11,%rax,4), %rsi movslq %r9d, %rax leaq (%r10,%rax,4), %rcx movl $0, %eax .L125: movss (%rsi,%rax), %xmm0 movss %xmm0, (%rcx,%rax) addq $4, %rax cmpq %rdx, %rax jne .L125 addl %r8d, %r9d .L128: addq $1, %rdi subl $1, %r8d addl %r13d, %r12d cmpq %rdi, %r15 je .L126 .L124: cmpl %edi, %r13d jg .L127 jmp .L128 .L126: movq 8(%rsp), %rax movq %rax, %r12 leaq (%rbx,%rax), %r14 .L129: movl $0x00000000, (%rax) addq $4, %rax cmpq %r14, %rax jne .L129 leaq 56(%rsp), %rdi movq (%rsp), %r15 movq %r15, %rsi call cudaMalloc@PLT leaq 64(%rsp), %rdi movq %rbx, %rsi call cudaMalloc@PLT leaq 72(%rsp), %rdi movq %rbx, %rsi call cudaMalloc@PLT leaq 80(%rsp), %rdi movl $4, %esi call cudaMalloc@PLT movl $1, %ecx movq %r15, %rdx movq 16(%rsp), %rsi movq 56(%rsp), %rdi call cudaMemcpy@PLT movl $1, %ecx movq %rbx, %rdx movq %rbp, %rsi movq 64(%rsp), %rdi call cudaMemcpy@PLT movl $1, %ecx movq %rbx, %rdx movq 8(%rsp), %rsi movq 72(%rsp), %rdi call cudaMemcpy@PLT call clock@PLT movq %rax, %r15 leal 31(%r13), %eax testl %r13d, %r13d cmovns %r13d, %eax sarl $5, %eax movl %eax, 88(%rsp) movl $1, 92(%rsp) movl $32, 100(%rsp) movl $1, 104(%rsp) movl $0, %r9d movl $0, %r8d movq 100(%rsp), %rdx movl $1, %ecx movq 88(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L146 .L130: call cudaDeviceSynchronize@PLT movl $2, %ecx movq %rbx, %rdx movq 72(%rsp), %rsi movq 8(%rsp), %rdi call cudaMemcpy@PLT testl %eax, %eax jne .L147 movl $0x00000000, (%rsp) .L131: movss (%rsp), %xmm2 addss (%r12), %xmm2 movss %xmm2, (%rsp) addq $4, %r12 cmpq %r14, %r12 jne .L131 call clock@PLT subq %r15, %rax pxor %xmm1, %xmm1 cvtsi2sdq %rax, %xmm1 divsd .LC12(%rip), %xmm1 pxor %xmm0, %xmm0 cvtss2sd (%rsp), %xmm0 mulsd .LC13(%rip), %xmm1 leaq .LC19(%rip), %rsi movl $2, %edi movl $2, %eax call __printf_chk@PLT call clock@PLT movq %rax, %r12 movq %rbp, %rcx movq 16(%rsp), %rdx leaq (%rbx,%rbp), %rsi movl $0x00000000, (%rsp) .L132: movl $0, %eax pxor %xmm1, %xmm1 .L133: movss 0(%rbp,%rax), %xmm0 mulss (%rdx,%rax), %xmm0 addss %xmm0, %xmm1 addq $4, %rax cmpq %rax, %rbx jne .L133 mulss (%rcx), %xmm1 addss (%rsp), %xmm1 movss %xmm1, (%rsp) addq $4, %rcx addq %rbx, %rdx cmpq %rsi, %rcx jne .L132 call clock@PLT subq %r12, %rax pxor %xmm1, %xmm1 cvtsi2sdq %rax, %xmm1 divsd .LC12(%rip), %xmm1 pxor %xmm0, %xmm0 cvtss2sd (%rsp), %xmm0 mulsd .LC13(%rip), %xmm1 leaq .LC20(%rip), %rsi movl $2, %edi movl $2, %eax call __printf_chk@PLT movq 64(%rsp), %rdi call cudaFree@PLT movq 56(%rsp), %rdi call cudaFree@PLT movq 72(%rsp), %rdi call cudaFree@PLT movq %rbp, %rdi call free@PLT movq 24(%rsp), %rdi call free@PLT movq 8(%rsp), %rdi call free@PLT movq 1144(%rsp), %rax subq %fs:40, %rax jne .L148 movl $0, %eax addq $1160, %rsp .cfi_remember_state .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .L146: .cfi_restore_state pxor %xmm1, %xmm1 movss .LC0(%rip), %xmm0 movl %r13d, %r9d movq 72(%rsp), %r8 movl %r13d, %ecx movq 56(%rsp), %rdx movl %r13d, %esi movq 64(%rsp), %rdi call _Z33__device_stub__Z3vmvPKfiS0_iPfiffPKfiS0_iPfiff jmp .L130 .L147: movl $953, %ecx leaq .LC6(%rip), %rdx leaq .LC7(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $1, %edi call exit@PLT .L148: call __stack_chk_fail@PLT .cfi_endproc .LFE2876: .size _Z21ejecutar_matrixVectori, .-_Z21ejecutar_matrixVectori .section .rodata.str1.1 .LC21: .string "vmv" .LC22: .string "vmSymv" .LC23: .string "vec_mat_vec_mult" .LC24: .string "sgemmNN" .LC25: .string "sgemmNT" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2912: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rbx movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC21(%rip), %rdx movq %rdx, %rcx leaq vmv(%rip), %rsi movq %rax, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC22(%rip), %rdx movq %rdx, %rcx leaq vmSymv(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC23(%rip), %rdx movq %rdx, %rcx leaq vec_mat_vec_mult(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC24(%rip), %rdx movq %rdx, %rcx leaq sgemmNN(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC25(%rip), %rdx movq %rdx, %rcx leaq sgemmNT(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2912: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .section .rodata.cst4,"aM",@progbits,4 .align 4 .LC0: .long 1065353216 .section .rodata.cst16,"aM",@progbits,16 .align 16 .LC2: .long 2147483647 .long 0 .long 0 .long 0 .section .rodata.cst8,"aM",@progbits,8 .align 8 .LC12: .long 0 .long 1093567616 .align 8 .LC13: .long 0 .long 1083129856 .section .rodata.cst4 .align 4 .LC15: .long 981467136 .align 4 .LC16: .long 1148846080 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "sgemmN.hip" .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include <stdio.h> #include <assert.h> #include <cuda.h> // zwykła funkcja w C/C++ void incrementArrayOnHost(double *tab, int N) { for (int i=0; i < N; i++) tab[i] += 1.0; } // funkcja (tzw. kernel) działająca na GPU __global__ void incrementArrayOnDevice(double *tab, int N) { int idx = blockIdx.y * gridDim.x * blockDim.x + blockIdx.x*blockDim.x + threadIdx.x; // if (idx < N && blockIdx.y > 0) // printf (" %d %d %d %d %d %d \n", idx, gridDim.x, blockDim.x, N, blockIdx.x, threadIdx.x); if (idx<N) tab[idx] += 1.0; } int main(void) { const int N = 100000000; printf("N = %d\n", N); double *a_h, *b_h; // wskaźniki na pamięć na CPU (host) double *a_d; // wskaźnik na bufor w GPU (device) // przydział pamięci na CPU a_h = new double [N]; b_h = new double [N]; // przydział pamięci na GPU cudaMalloc((void **) &a_d, sizeof(double)*N); // inicjalizacja danych na CPU for (int i=0; i<N; i++) { a_h[i] = i + 1.0; b_h[i] = 0; } // przesłąnie danych na GPU: a_h -> a_d cudaMemcpy(a_d, a_h, sizeof(double)*N, cudaMemcpyDefault ); // robimy jakieś obliczenia na CPU incrementArrayOnHost(a_h, N); // a teraz próbujemy zrobić to samo na GPU dim3 blockSize = 512; dim3 gridSize (1,1,1); const int max_block_size = 65535; int nBlocks = N/blockSize.x + (N%blockSize.x == 0 ? 0 : 1); gridSize.y = 1 + nBlocks/max_block_size; gridSize.x = (nBlocks > max_block_size) ? max_block_size : nBlocks; printf("%d %d\n", gridSize.x, gridSize.y); // wywołujemy kernel na GPU incrementArrayOnDevice <<< gridSize, blockSize >>> (a_d, N); // kopiujemy wynik z GPU do CPU cudaDeviceSynchronize(); cudaMemcpy(b_h, a_d, sizeof(double)*N, cudaMemcpyDefault); // sprawdzamy wynik for (int i=0; i<N; i++) if (a_h[i] != b_h[i] && i < 100) { printf ("i=%d %g %g\n", i, a_h[i], b_h[i]); assert(a_h[i] == b_h[i]); } // sprzątamy delete [] a_h; delete [] b_h; cudaFree(a_d); printf("Jeżeli widzisz ten napis, to program działa poprawnie\n"); }
code for sm_80 Function : _Z22incrementArrayOnDevicePdi .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R0, SR_CTAID.Y ; /* 0x0000000000007919 */ /* 0x000e280000002600 */ /*0020*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */ /* 0x000e280000002500 */ /*0030*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */ /* 0x000e620000002100 */ /*0040*/ IMAD R0, R0, c[0x0][0xc], R3 ; /* 0x0000030000007a24 */ /* 0x001fc800078e0203 */ /*0050*/ IMAD R0, R0, c[0x0][0x0], R5 ; /* 0x0000000000007a24 */ /* 0x002fca00078e0205 */ /*0060*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x168], PT ; /* 0x00005a0000007a0c */ /* 0x000fda0003f06270 */ /*0070*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0080*/ HFMA2.MMA R3, -RZ, RZ, 0, 4.76837158203125e-07 ; /* 0x00000008ff037435 */ /* 0x000fe200000001ff */ /*0090*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fd20000000a00 */ /*00a0*/ IMAD.WIDE R2, R0, R3, c[0x0][0x160] ; /* 0x0000580000027625 */ /* 0x000fca00078e0203 */ /*00b0*/ LDG.E.64 R4, [R2.64] ; /* 0x0000000402047981 */ /* 0x000ea4000c1e1b00 */ /*00c0*/ DADD R4, R4, 1 ; /* 0x3ff0000004047429 */ /* 0x004e0e0000000000 */ /*00d0*/ STG.E.64 [R2.64], R4 ; /* 0x0000000402007986 */ /* 0x001fe2000c101b04 */ /*00e0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*00f0*/ BRA 0xf0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0100*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0110*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0120*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0130*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include <stdio.h> #include <assert.h> #include <cuda.h> // zwykła funkcja w C/C++ void incrementArrayOnHost(double *tab, int N) { for (int i=0; i < N; i++) tab[i] += 1.0; } // funkcja (tzw. kernel) działająca na GPU __global__ void incrementArrayOnDevice(double *tab, int N) { int idx = blockIdx.y * gridDim.x * blockDim.x + blockIdx.x*blockDim.x + threadIdx.x; // if (idx < N && blockIdx.y > 0) // printf (" %d %d %d %d %d %d \n", idx, gridDim.x, blockDim.x, N, blockIdx.x, threadIdx.x); if (idx<N) tab[idx] += 1.0; } int main(void) { const int N = 100000000; printf("N = %d\n", N); double *a_h, *b_h; // wskaźniki na pamięć na CPU (host) double *a_d; // wskaźnik na bufor w GPU (device) // przydział pamięci na CPU a_h = new double [N]; b_h = new double [N]; // przydział pamięci na GPU cudaMalloc((void **) &a_d, sizeof(double)*N); // inicjalizacja danych na CPU for (int i=0; i<N; i++) { a_h[i] = i + 1.0; b_h[i] = 0; } // przesłąnie danych na GPU: a_h -> a_d cudaMemcpy(a_d, a_h, sizeof(double)*N, cudaMemcpyDefault ); // robimy jakieś obliczenia na CPU incrementArrayOnHost(a_h, N); // a teraz próbujemy zrobić to samo na GPU dim3 blockSize = 512; dim3 gridSize (1,1,1); const int max_block_size = 65535; int nBlocks = N/blockSize.x + (N%blockSize.x == 0 ? 0 : 1); gridSize.y = 1 + nBlocks/max_block_size; gridSize.x = (nBlocks > max_block_size) ? max_block_size : nBlocks; printf("%d %d\n", gridSize.x, gridSize.y); // wywołujemy kernel na GPU incrementArrayOnDevice <<< gridSize, blockSize >>> (a_d, N); // kopiujemy wynik z GPU do CPU cudaDeviceSynchronize(); cudaMemcpy(b_h, a_d, sizeof(double)*N, cudaMemcpyDefault); // sprawdzamy wynik for (int i=0; i<N; i++) if (a_h[i] != b_h[i] && i < 100) { printf ("i=%d %g %g\n", i, a_h[i], b_h[i]); assert(a_h[i] == b_h[i]); } // sprzątamy delete [] a_h; delete [] b_h; cudaFree(a_d); printf("Jeżeli widzisz ten napis, to program działa poprawnie\n"); }
.file "tmpxft_00024dc6_00000000-6_4.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2061: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2061: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z20incrementArrayOnHostPdi .type _Z20incrementArrayOnHostPdi, @function _Z20incrementArrayOnHostPdi: .LFB2057: .cfi_startproc endbr64 testl %esi, %esi jle .L3 movq %rdi, %rax movslq %esi, %rsi leaq (%rdi,%rsi,8), %rdx movsd .LC0(%rip), %xmm1 .L5: movapd %xmm1, %xmm0 addsd (%rax), %xmm0 movsd %xmm0, (%rax) addq $8, %rax cmpq %rdx, %rax jne .L5 .L3: ret .cfi_endproc .LFE2057: .size _Z20incrementArrayOnHostPdi, .-_Z20incrementArrayOnHostPdi .globl _Z43__device_stub__Z22incrementArrayOnDevicePdiPdi .type _Z43__device_stub__Z22incrementArrayOnDevicePdiPdi, @function _Z43__device_stub__Z22incrementArrayOnDevicePdiPdi: .LFB2083: .cfi_startproc endbr64 subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 8(%rsp) movl %esi, 4(%rsp) movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) leaq 4(%rsp), %rax movq %rax, 88(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L11 .L7: movq 104(%rsp), %rax subq %fs:40, %rax jne .L12 addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L11: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 136 pushq 24(%rsp) .cfi_def_cfa_offset 144 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z22incrementArrayOnDevicePdi(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 128 jmp .L7 .L12: call __stack_chk_fail@PLT .cfi_endproc .LFE2083: .size _Z43__device_stub__Z22incrementArrayOnDevicePdiPdi, .-_Z43__device_stub__Z22incrementArrayOnDevicePdiPdi .globl _Z22incrementArrayOnDevicePdi .type _Z22incrementArrayOnDevicePdi, @function _Z22incrementArrayOnDevicePdi: .LFB2084: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z43__device_stub__Z22incrementArrayOnDevicePdiPdi addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2084: .size _Z22incrementArrayOnDevicePdi, .-_Z22incrementArrayOnDevicePdi .section .rodata.str1.1,"aMS",@progbits,1 .LC1: .string "N = %d\n" .LC3: .string "%d %d\n" .LC4: .string "i=%d %g %g\n" .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC5: .string "Je\305\274eli widzisz ten napis, to program dzia\305\202a poprawnie\n" .text .globl main .type main, @function main: .LFB2058: .cfi_startproc endbr64 pushq %r13 .cfi_def_cfa_offset 16 .cfi_offset 13, -16 pushq %r12 .cfi_def_cfa_offset 24 .cfi_offset 12, -24 pushq %rbp .cfi_def_cfa_offset 32 .cfi_offset 6, -32 pushq %rbx .cfi_def_cfa_offset 40 .cfi_offset 3, -40 subq $56, %rsp .cfi_def_cfa_offset 96 movq %fs:40, %rax movq %rax, 40(%rsp) xorl %eax, %eax movl $100000000, %edx leaq .LC1(%rip), %rsi movl $2, %edi call __printf_chk@PLT movl $800000000, %edi call _Znam@PLT movq %rax, %rbp movl $800000000, %edi call _Znam@PLT movq %rax, %r12 leaq 8(%rsp), %rdi movl $800000000, %esi call cudaMalloc@PLT movl $0, %eax movsd .LC0(%rip), %xmm1 .L16: pxor %xmm0, %xmm0 cvtsi2sdl %eax, %xmm0 addsd %xmm1, %xmm0 movsd %xmm0, 0(%rbp,%rax,8) movq $0x000000000, (%r12,%rax,8) addq $1, %rax cmpq $100000000, %rax jne .L16 movl $4, %ecx movl $800000000, %edx movq %rbp, %rsi movq 8(%rsp), %rdi call cudaMemcpy@PLT movl $100000000, %esi movq %rbp, %rdi call _Z20incrementArrayOnHostPdi movl $1, 20(%rsp) movl $1, 24(%rsp) movl $1, 36(%rsp) movl $3, %ecx movl $65535, %edx leaq .LC3(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $65535, 28(%rsp) movl $3, 32(%rsp) movl $512, 16(%rsp) movl 24(%rsp), %ecx movl $0, %r9d movl $0, %r8d movq 16(%rsp), %rdx movq 28(%rsp), %rdi movl 36(%rsp), %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L25 .L17: call cudaDeviceSynchronize@PLT movl $4, %ecx movl $800000000, %edx movq 8(%rsp), %rsi movq %r12, %rdi call cudaMemcpy@PLT movl $0, %ebx movl $1, %r13d jmp .L21 .L25: movl $100000000, %esi movq 8(%rsp), %rdi call _Z43__device_stub__Z22incrementArrayOnDevicePdiPdi jmp .L17 .L18: addq $1, %rbx cmpq $100000000, %rbx je .L26 .L21: movsd 0(%rbp,%rbx,8), %xmm0 movsd (%r12,%rbx,8), %xmm1 cmpl $99, %ebx jg .L18 ucomisd %xmm1, %xmm0 setp %al cmovne %r13d, %eax testb %al, %al je .L18 movl %ebx, %edx leaq .LC4(%rip), %rsi movl $2, %edi movl $2, %eax call __printf_chk@PLT addq $1, %rbx jmp .L21 .L26: movq %rbp, %rdi call _ZdaPv@PLT movq %r12, %rdi call _ZdaPv@PLT movq 8(%rsp), %rdi call cudaFree@PLT leaq .LC5(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movq 40(%rsp), %rax subq %fs:40, %rax jne .L27 movl $0, %eax addq $56, %rsp .cfi_remember_state .cfi_def_cfa_offset 40 popq %rbx .cfi_def_cfa_offset 32 popq %rbp .cfi_def_cfa_offset 24 popq %r12 .cfi_def_cfa_offset 16 popq %r13 .cfi_def_cfa_offset 8 ret .L27: .cfi_restore_state call __stack_chk_fail@PLT .cfi_endproc .LFE2058: .size main, .-main .section .rodata.str1.1 .LC6: .string "_Z22incrementArrayOnDevicePdi" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2086: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC6(%rip), %rdx movq %rdx, %rcx leaq _Z22incrementArrayOnDevicePdi(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2086: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .section .rodata.cst8,"aM",@progbits,8 .align 8 .LC0: .long 0 .long 1072693248 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <stdio.h> #include <assert.h> #include <cuda.h> // zwykła funkcja w C/C++ void incrementArrayOnHost(double *tab, int N) { for (int i=0; i < N; i++) tab[i] += 1.0; } // funkcja (tzw. kernel) działająca na GPU __global__ void incrementArrayOnDevice(double *tab, int N) { int idx = blockIdx.y * gridDim.x * blockDim.x + blockIdx.x*blockDim.x + threadIdx.x; // if (idx < N && blockIdx.y > 0) // printf (" %d %d %d %d %d %d \n", idx, gridDim.x, blockDim.x, N, blockIdx.x, threadIdx.x); if (idx<N) tab[idx] += 1.0; } int main(void) { const int N = 100000000; printf("N = %d\n", N); double *a_h, *b_h; // wskaźniki na pamięć na CPU (host) double *a_d; // wskaźnik na bufor w GPU (device) // przydział pamięci na CPU a_h = new double [N]; b_h = new double [N]; // przydział pamięci na GPU cudaMalloc((void **) &a_d, sizeof(double)*N); // inicjalizacja danych na CPU for (int i=0; i<N; i++) { a_h[i] = i + 1.0; b_h[i] = 0; } // przesłąnie danych na GPU: a_h -> a_d cudaMemcpy(a_d, a_h, sizeof(double)*N, cudaMemcpyDefault ); // robimy jakieś obliczenia na CPU incrementArrayOnHost(a_h, N); // a teraz próbujemy zrobić to samo na GPU dim3 blockSize = 512; dim3 gridSize (1,1,1); const int max_block_size = 65535; int nBlocks = N/blockSize.x + (N%blockSize.x == 0 ? 0 : 1); gridSize.y = 1 + nBlocks/max_block_size; gridSize.x = (nBlocks > max_block_size) ? max_block_size : nBlocks; printf("%d %d\n", gridSize.x, gridSize.y); // wywołujemy kernel na GPU incrementArrayOnDevice <<< gridSize, blockSize >>> (a_d, N); // kopiujemy wynik z GPU do CPU cudaDeviceSynchronize(); cudaMemcpy(b_h, a_d, sizeof(double)*N, cudaMemcpyDefault); // sprawdzamy wynik for (int i=0; i<N; i++) if (a_h[i] != b_h[i] && i < 100) { printf ("i=%d %g %g\n", i, a_h[i], b_h[i]); assert(a_h[i] == b_h[i]); } // sprzątamy delete [] a_h; delete [] b_h; cudaFree(a_d); printf("Jeżeli widzisz ten napis, to program działa poprawnie\n"); }
#include <stdio.h> #include <assert.h> #include <hip/hip_runtime.h> // zwykła funkcja w C/C++ void incrementArrayOnHost(double *tab, int N) { for (int i=0; i < N; i++) tab[i] += 1.0; } // funkcja (tzw. kernel) działająca na GPU __global__ void incrementArrayOnDevice(double *tab, int N) { int idx = blockIdx.y * gridDim.x * blockDim.x + blockIdx.x*blockDim.x + threadIdx.x; // if (idx < N && blockIdx.y > 0) // printf (" %d %d %d %d %d %d \n", idx, gridDim.x, blockDim.x, N, blockIdx.x, threadIdx.x); if (idx<N) tab[idx] += 1.0; } int main(void) { const int N = 100000000; printf("N = %d\n", N); double *a_h, *b_h; // wskaźniki na pamięć na CPU (host) double *a_d; // wskaźnik na bufor w GPU (device) // przydział pamięci na CPU a_h = new double [N]; b_h = new double [N]; // przydział pamięci na GPU hipMalloc((void **) &a_d, sizeof(double)*N); // inicjalizacja danych na CPU for (int i=0; i<N; i++) { a_h[i] = i + 1.0; b_h[i] = 0; } // przesłąnie danych na GPU: a_h -> a_d hipMemcpy(a_d, a_h, sizeof(double)*N, hipMemcpyDefault ); // robimy jakieś obliczenia na CPU incrementArrayOnHost(a_h, N); // a teraz próbujemy zrobić to samo na GPU dim3 blockSize = 512; dim3 gridSize (1,1,1); const int max_block_size = 65535; int nBlocks = N/blockSize.x + (N%blockSize.x == 0 ? 0 : 1); gridSize.y = 1 + nBlocks/max_block_size; gridSize.x = (nBlocks > max_block_size) ? max_block_size : nBlocks; printf("%d %d\n", gridSize.x, gridSize.y); // wywołujemy kernel na GPU incrementArrayOnDevice <<< gridSize, blockSize >>> (a_d, N); // kopiujemy wynik z GPU do CPU hipDeviceSynchronize(); hipMemcpy(b_h, a_d, sizeof(double)*N, hipMemcpyDefault); // sprawdzamy wynik for (int i=0; i<N; i++) if (a_h[i] != b_h[i] && i < 100) { printf ("i=%d %g %g\n", i, a_h[i], b_h[i]); assert(a_h[i] == b_h[i]); } // sprzątamy delete [] a_h; delete [] b_h; hipFree(a_d); printf("Jeżeli widzisz ten napis, to program działa poprawnie\n"); }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <stdio.h> #include <assert.h> #include <hip/hip_runtime.h> // zwykła funkcja w C/C++ void incrementArrayOnHost(double *tab, int N) { for (int i=0; i < N; i++) tab[i] += 1.0; } // funkcja (tzw. kernel) działająca na GPU __global__ void incrementArrayOnDevice(double *tab, int N) { int idx = blockIdx.y * gridDim.x * blockDim.x + blockIdx.x*blockDim.x + threadIdx.x; // if (idx < N && blockIdx.y > 0) // printf (" %d %d %d %d %d %d \n", idx, gridDim.x, blockDim.x, N, blockIdx.x, threadIdx.x); if (idx<N) tab[idx] += 1.0; } int main(void) { const int N = 100000000; printf("N = %d\n", N); double *a_h, *b_h; // wskaźniki na pamięć na CPU (host) double *a_d; // wskaźnik na bufor w GPU (device) // przydział pamięci na CPU a_h = new double [N]; b_h = new double [N]; // przydział pamięci na GPU hipMalloc((void **) &a_d, sizeof(double)*N); // inicjalizacja danych na CPU for (int i=0; i<N; i++) { a_h[i] = i + 1.0; b_h[i] = 0; } // przesłąnie danych na GPU: a_h -> a_d hipMemcpy(a_d, a_h, sizeof(double)*N, hipMemcpyDefault ); // robimy jakieś obliczenia na CPU incrementArrayOnHost(a_h, N); // a teraz próbujemy zrobić to samo na GPU dim3 blockSize = 512; dim3 gridSize (1,1,1); const int max_block_size = 65535; int nBlocks = N/blockSize.x + (N%blockSize.x == 0 ? 0 : 1); gridSize.y = 1 + nBlocks/max_block_size; gridSize.x = (nBlocks > max_block_size) ? max_block_size : nBlocks; printf("%d %d\n", gridSize.x, gridSize.y); // wywołujemy kernel na GPU incrementArrayOnDevice <<< gridSize, blockSize >>> (a_d, N); // kopiujemy wynik z GPU do CPU hipDeviceSynchronize(); hipMemcpy(b_h, a_d, sizeof(double)*N, hipMemcpyDefault); // sprawdzamy wynik for (int i=0; i<N; i++) if (a_h[i] != b_h[i] && i < 100) { printf ("i=%d %g %g\n", i, a_h[i], b_h[i]); assert(a_h[i] == b_h[i]); } // sprzątamy delete [] a_h; delete [] b_h; hipFree(a_d); printf("Jeżeli widzisz ten napis, to program działa poprawnie\n"); }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z22incrementArrayOnDevicePdi .globl _Z22incrementArrayOnDevicePdi .p2align 8 .type _Z22incrementArrayOnDevicePdi,@function _Z22incrementArrayOnDevicePdi: s_clause 0x2 s_load_b32 s2, s[0:1], 0x10 s_load_b32 s3, s[0:1], 0x1c s_load_b32 s4, s[0:1], 0x8 s_waitcnt lgkmcnt(0) s_mul_i32 s2, s2, s15 s_and_b32 s3, s3, 0xffff s_add_i32 s2, s2, s14 s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s2, s3, v[0:1] s_mov_b32 s2, exec_lo v_cmpx_gt_i32_e64 s4, v1 s_cbranch_execz .LBB0_2 s_load_b64 s[0:1], s[0:1], 0x0 v_ashrrev_i32_e32 v2, 31, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b64 v[0:1], 3, v[1:2] s_waitcnt lgkmcnt(0) v_add_co_u32 v0, vcc_lo, s0, v0 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo global_load_b64 v[2:3], v[0:1], off s_waitcnt vmcnt(0) v_add_f64 v[2:3], v[2:3], 1.0 global_store_b64 v[0:1], v[2:3], off .LBB0_2: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z22incrementArrayOnDevicePdi .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 272 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 4 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z22incrementArrayOnDevicePdi, .Lfunc_end0-_Z22incrementArrayOnDevicePdi .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .offset: 8 .size: 4 .value_kind: by_value - .offset: 16 .size: 4 .value_kind: hidden_block_count_x - .offset: 20 .size: 4 .value_kind: hidden_block_count_y - .offset: 24 .size: 4 .value_kind: hidden_block_count_z - .offset: 28 .size: 2 .value_kind: hidden_group_size_x - .offset: 30 .size: 2 .value_kind: hidden_group_size_y - .offset: 32 .size: 2 .value_kind: hidden_group_size_z - .offset: 34 .size: 2 .value_kind: hidden_remainder_x - .offset: 36 .size: 2 .value_kind: hidden_remainder_y - .offset: 38 .size: 2 .value_kind: hidden_remainder_z - .offset: 56 .size: 8 .value_kind: hidden_global_offset_x - .offset: 64 .size: 8 .value_kind: hidden_global_offset_y - .offset: 72 .size: 8 .value_kind: hidden_global_offset_z - .offset: 80 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 272 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z22incrementArrayOnDevicePdi .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z22incrementArrayOnDevicePdi.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 4 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <stdio.h> #include <assert.h> #include <hip/hip_runtime.h> // zwykła funkcja w C/C++ void incrementArrayOnHost(double *tab, int N) { for (int i=0; i < N; i++) tab[i] += 1.0; } // funkcja (tzw. kernel) działająca na GPU __global__ void incrementArrayOnDevice(double *tab, int N) { int idx = blockIdx.y * gridDim.x * blockDim.x + blockIdx.x*blockDim.x + threadIdx.x; // if (idx < N && blockIdx.y > 0) // printf (" %d %d %d %d %d %d \n", idx, gridDim.x, blockDim.x, N, blockIdx.x, threadIdx.x); if (idx<N) tab[idx] += 1.0; } int main(void) { const int N = 100000000; printf("N = %d\n", N); double *a_h, *b_h; // wskaźniki na pamięć na CPU (host) double *a_d; // wskaźnik na bufor w GPU (device) // przydział pamięci na CPU a_h = new double [N]; b_h = new double [N]; // przydział pamięci na GPU hipMalloc((void **) &a_d, sizeof(double)*N); // inicjalizacja danych na CPU for (int i=0; i<N; i++) { a_h[i] = i + 1.0; b_h[i] = 0; } // przesłąnie danych na GPU: a_h -> a_d hipMemcpy(a_d, a_h, sizeof(double)*N, hipMemcpyDefault ); // robimy jakieś obliczenia na CPU incrementArrayOnHost(a_h, N); // a teraz próbujemy zrobić to samo na GPU dim3 blockSize = 512; dim3 gridSize (1,1,1); const int max_block_size = 65535; int nBlocks = N/blockSize.x + (N%blockSize.x == 0 ? 0 : 1); gridSize.y = 1 + nBlocks/max_block_size; gridSize.x = (nBlocks > max_block_size) ? max_block_size : nBlocks; printf("%d %d\n", gridSize.x, gridSize.y); // wywołujemy kernel na GPU incrementArrayOnDevice <<< gridSize, blockSize >>> (a_d, N); // kopiujemy wynik z GPU do CPU hipDeviceSynchronize(); hipMemcpy(b_h, a_d, sizeof(double)*N, hipMemcpyDefault); // sprawdzamy wynik for (int i=0; i<N; i++) if (a_h[i] != b_h[i] && i < 100) { printf ("i=%d %g %g\n", i, a_h[i], b_h[i]); assert(a_h[i] == b_h[i]); } // sprzątamy delete [] a_h; delete [] b_h; hipFree(a_d); printf("Jeżeli widzisz ten napis, to program działa poprawnie\n"); }
.text .file "4.hip" .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 # -- Begin function _Z20incrementArrayOnHostPdi .LCPI0_0: .quad 0x3ff0000000000000 # double 1 .text .globl _Z20incrementArrayOnHostPdi .p2align 4, 0x90 .type _Z20incrementArrayOnHostPdi,@function _Z20incrementArrayOnHostPdi: # @_Z20incrementArrayOnHostPdi .cfi_startproc # %bb.0: testl %esi, %esi jle .LBB0_3 # %bb.1: # %.lr.ph.preheader movl %esi, %eax xorl %ecx, %ecx movsd .LCPI0_0(%rip), %xmm0 # xmm0 = mem[0],zero .p2align 4, 0x90 .LBB0_2: # %.lr.ph # =>This Inner Loop Header: Depth=1 movsd (%rdi,%rcx,8), %xmm1 # xmm1 = mem[0],zero addsd %xmm0, %xmm1 movsd %xmm1, (%rdi,%rcx,8) incq %rcx cmpq %rcx, %rax jne .LBB0_2 .LBB0_3: # %._crit_edge retq .Lfunc_end0: .size _Z20incrementArrayOnHostPdi, .Lfunc_end0-_Z20incrementArrayOnHostPdi .cfi_endproc # -- End function .globl _Z37__device_stub__incrementArrayOnDevicePdi # -- Begin function _Z37__device_stub__incrementArrayOnDevicePdi .p2align 4, 0x90 .type _Z37__device_stub__incrementArrayOnDevicePdi,@function _Z37__device_stub__incrementArrayOnDevicePdi: # @_Z37__device_stub__incrementArrayOnDevicePdi .cfi_startproc # %bb.0: subq $88, %rsp .cfi_def_cfa_offset 96 movq %rdi, 56(%rsp) movl %esi, 4(%rsp) leaq 56(%rsp), %rax movq %rax, 64(%rsp) leaq 4(%rsp), %rax movq %rax, 72(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 64(%rsp), %r9 movl $_Z22incrementArrayOnDevicePdi, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $104, %rsp .cfi_adjust_cfa_offset -104 retq .Lfunc_end1: .size _Z37__device_stub__incrementArrayOnDevicePdi, .Lfunc_end1-_Z37__device_stub__incrementArrayOnDevicePdi .cfi_endproc # -- End function .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 # -- Begin function main .LCPI2_0: .quad 0x3ff0000000000000 # double 1 .text .globl main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %rbx .cfi_def_cfa_offset 32 subq $96, %rsp .cfi_def_cfa_offset 128 .cfi_offset %rbx, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 xorl %r15d, %r15d movl $.L.str, %edi movl $100000000, %esi # imm = 0x5F5E100 xorl %eax, %eax callq printf movl $800000000, %edi # imm = 0x2FAF0800 callq _Znam movq %rax, %rbx movl $800000000, %edi # imm = 0x2FAF0800 callq _Znam movq %rax, %r14 leaq 8(%rsp), %rdi movl $800000000, %esi # imm = 0x2FAF0800 callq hipMalloc movl $800000000, %edx # imm = 0x2FAF0800 movq %r14, %rdi xorl %esi, %esi callq memset@PLT .p2align 4, 0x90 .LBB2_1: # =>This Inner Loop Header: Depth=1 leaq 1(%r15), %rax xorps %xmm0, %xmm0 cvtsi2sd %eax, %xmm0 movsd %xmm0, (%rbx,%r15,8) movq %rax, %r15 cmpq $100000000, %rax # imm = 0x5F5E100 jne .LBB2_1 # %bb.2: movq 8(%rsp), %rdi movl $800000000, %edx # imm = 0x2FAF0800 movq %rbx, %rsi movl $4, %ecx callq hipMemcpy xorl %eax, %eax movsd .LCPI2_0(%rip), %xmm0 # xmm0 = mem[0],zero .p2align 4, 0x90 .LBB2_3: # %.lr.ph.i # =>This Inner Loop Header: Depth=1 movsd (%rbx,%rax,8), %xmm1 # xmm1 = mem[0],zero addsd %xmm0, %xmm1 movsd %xmm1, (%rbx,%rax,8) incq %rax cmpq $100000000, %rax # imm = 0x5F5E100 jne .LBB2_3 # %bb.4: # %_Z20incrementArrayOnHostPdi.exit movl $.L.str.1, %edi movl $65535, %esi # imm = 0xFFFF movl $3, %edx xorl %eax, %eax callq printf movabsq $12884967423, %rdi # imm = 0x30000FFFF movabsq $4294967808, %rdx # imm = 0x100000200 movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB2_6 # %bb.5: movq 8(%rsp), %rax movq %rax, 72(%rsp) movl $100000000, 20(%rsp) # imm = 0x5F5E100 leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 20(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rdi leaq 40(%rsp), %rsi leaq 32(%rsp), %rdx leaq 24(%rsp), %rcx callq __hipPopCallConfiguration movq 56(%rsp), %rsi movl 64(%rsp), %edx movq 40(%rsp), %rcx movl 48(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z22incrementArrayOnDevicePdi, %edi pushq 24(%rsp) .cfi_adjust_cfa_offset 8 pushq 40(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB2_6: callq hipDeviceSynchronize movq 8(%rsp), %rsi movl $800000000, %edx # imm = 0x2FAF0800 movq %r14, %rdi movl $4, %ecx callq hipMemcpy xorl %r15d, %r15d jmp .LBB2_7 .p2align 4, 0x90 .LBB2_10: # in Loop: Header=BB2_7 Depth=1 incq %r15 cmpq $100000000, %r15 # imm = 0x5F5E100 je .LBB2_11 .LBB2_7: # =>This Inner Loop Header: Depth=1 cmpq $99, %r15 ja .LBB2_10 # %bb.8: # in Loop: Header=BB2_7 Depth=1 movsd (%rbx,%r15,8), %xmm0 # xmm0 = mem[0],zero movsd (%r14,%r15,8), %xmm1 # xmm1 = mem[0],zero ucomisd %xmm1, %xmm0 jne .LBB2_9 jnp .LBB2_10 .LBB2_9: # in Loop: Header=BB2_7 Depth=1 movl $.L.str.2, %edi movl %r15d, %esi movb $2, %al callq printf jmp .LBB2_10 .LBB2_11: movq %rbx, %rdi callq _ZdaPv movq %r14, %rdi callq _ZdaPv movq 8(%rsp), %rdi callq hipFree movl $.Lstr, %edi callq puts@PLT xorl %eax, %eax addq $96, %rsp .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 retq .Lfunc_end2: .size main, .Lfunc_end2-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB3_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB3_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z22incrementArrayOnDevicePdi, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end3: .size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB4_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB4_2: retq .Lfunc_end4: .size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor .cfi_endproc # -- End function .type _Z22incrementArrayOnDevicePdi,@object # @_Z22incrementArrayOnDevicePdi .section .rodata,"a",@progbits .globl _Z22incrementArrayOnDevicePdi .p2align 3, 0x0 _Z22incrementArrayOnDevicePdi: .quad _Z37__device_stub__incrementArrayOnDevicePdi .size _Z22incrementArrayOnDevicePdi, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "N = %d\n" .size .L.str, 8 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz "%d %d\n" .size .L.str.1, 7 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz "i=%d %g %g\n" .size .L.str.2, 12 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z22incrementArrayOnDevicePdi" .size .L__unnamed_1, 30 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .type .Lstr,@object # @str .section .rodata.str1.1,"aMS",@progbits,1 .Lstr: .asciz "Je\305\274eli widzisz ten napis, to program dzia\305\202a poprawnie" .size .Lstr, 56 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z37__device_stub__incrementArrayOnDevicePdi .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z22incrementArrayOnDevicePdi .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z22incrementArrayOnDevicePdi .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R0, SR_CTAID.Y ; /* 0x0000000000007919 */ /* 0x000e280000002600 */ /*0020*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */ /* 0x000e280000002500 */ /*0030*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */ /* 0x000e620000002100 */ /*0040*/ IMAD R0, R0, c[0x0][0xc], R3 ; /* 0x0000030000007a24 */ /* 0x001fc800078e0203 */ /*0050*/ IMAD R0, R0, c[0x0][0x0], R5 ; /* 0x0000000000007a24 */ /* 0x002fca00078e0205 */ /*0060*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x168], PT ; /* 0x00005a0000007a0c */ /* 0x000fda0003f06270 */ /*0070*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0080*/ HFMA2.MMA R3, -RZ, RZ, 0, 4.76837158203125e-07 ; /* 0x00000008ff037435 */ /* 0x000fe200000001ff */ /*0090*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fd20000000a00 */ /*00a0*/ IMAD.WIDE R2, R0, R3, c[0x0][0x160] ; /* 0x0000580000027625 */ /* 0x000fca00078e0203 */ /*00b0*/ LDG.E.64 R4, [R2.64] ; /* 0x0000000402047981 */ /* 0x000ea4000c1e1b00 */ /*00c0*/ DADD R4, R4, 1 ; /* 0x3ff0000004047429 */ /* 0x004e0e0000000000 */ /*00d0*/ STG.E.64 [R2.64], R4 ; /* 0x0000000402007986 */ /* 0x001fe2000c101b04 */ /*00e0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*00f0*/ BRA 0xf0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0100*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0110*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0120*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0130*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z22incrementArrayOnDevicePdi .globl _Z22incrementArrayOnDevicePdi .p2align 8 .type _Z22incrementArrayOnDevicePdi,@function _Z22incrementArrayOnDevicePdi: s_clause 0x2 s_load_b32 s2, s[0:1], 0x10 s_load_b32 s3, s[0:1], 0x1c s_load_b32 s4, s[0:1], 0x8 s_waitcnt lgkmcnt(0) s_mul_i32 s2, s2, s15 s_and_b32 s3, s3, 0xffff s_add_i32 s2, s2, s14 s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s2, s3, v[0:1] s_mov_b32 s2, exec_lo v_cmpx_gt_i32_e64 s4, v1 s_cbranch_execz .LBB0_2 s_load_b64 s[0:1], s[0:1], 0x0 v_ashrrev_i32_e32 v2, 31, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b64 v[0:1], 3, v[1:2] s_waitcnt lgkmcnt(0) v_add_co_u32 v0, vcc_lo, s0, v0 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo global_load_b64 v[2:3], v[0:1], off s_waitcnt vmcnt(0) v_add_f64 v[2:3], v[2:3], 1.0 global_store_b64 v[0:1], v[2:3], off .LBB0_2: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z22incrementArrayOnDevicePdi .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 272 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 4 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z22incrementArrayOnDevicePdi, .Lfunc_end0-_Z22incrementArrayOnDevicePdi .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .offset: 8 .size: 4 .value_kind: by_value - .offset: 16 .size: 4 .value_kind: hidden_block_count_x - .offset: 20 .size: 4 .value_kind: hidden_block_count_y - .offset: 24 .size: 4 .value_kind: hidden_block_count_z - .offset: 28 .size: 2 .value_kind: hidden_group_size_x - .offset: 30 .size: 2 .value_kind: hidden_group_size_y - .offset: 32 .size: 2 .value_kind: hidden_group_size_z - .offset: 34 .size: 2 .value_kind: hidden_remainder_x - .offset: 36 .size: 2 .value_kind: hidden_remainder_y - .offset: 38 .size: 2 .value_kind: hidden_remainder_z - .offset: 56 .size: 8 .value_kind: hidden_global_offset_x - .offset: 64 .size: 8 .value_kind: hidden_global_offset_y - .offset: 72 .size: 8 .value_kind: hidden_global_offset_z - .offset: 80 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 272 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z22incrementArrayOnDevicePdi .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z22incrementArrayOnDevicePdi.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 4 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_00024dc6_00000000-6_4.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2061: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2061: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z20incrementArrayOnHostPdi .type _Z20incrementArrayOnHostPdi, @function _Z20incrementArrayOnHostPdi: .LFB2057: .cfi_startproc endbr64 testl %esi, %esi jle .L3 movq %rdi, %rax movslq %esi, %rsi leaq (%rdi,%rsi,8), %rdx movsd .LC0(%rip), %xmm1 .L5: movapd %xmm1, %xmm0 addsd (%rax), %xmm0 movsd %xmm0, (%rax) addq $8, %rax cmpq %rdx, %rax jne .L5 .L3: ret .cfi_endproc .LFE2057: .size _Z20incrementArrayOnHostPdi, .-_Z20incrementArrayOnHostPdi .globl _Z43__device_stub__Z22incrementArrayOnDevicePdiPdi .type _Z43__device_stub__Z22incrementArrayOnDevicePdiPdi, @function _Z43__device_stub__Z22incrementArrayOnDevicePdiPdi: .LFB2083: .cfi_startproc endbr64 subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 8(%rsp) movl %esi, 4(%rsp) movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) leaq 4(%rsp), %rax movq %rax, 88(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L11 .L7: movq 104(%rsp), %rax subq %fs:40, %rax jne .L12 addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L11: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 136 pushq 24(%rsp) .cfi_def_cfa_offset 144 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z22incrementArrayOnDevicePdi(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 128 jmp .L7 .L12: call __stack_chk_fail@PLT .cfi_endproc .LFE2083: .size _Z43__device_stub__Z22incrementArrayOnDevicePdiPdi, .-_Z43__device_stub__Z22incrementArrayOnDevicePdiPdi .globl _Z22incrementArrayOnDevicePdi .type _Z22incrementArrayOnDevicePdi, @function _Z22incrementArrayOnDevicePdi: .LFB2084: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z43__device_stub__Z22incrementArrayOnDevicePdiPdi addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2084: .size _Z22incrementArrayOnDevicePdi, .-_Z22incrementArrayOnDevicePdi .section .rodata.str1.1,"aMS",@progbits,1 .LC1: .string "N = %d\n" .LC3: .string "%d %d\n" .LC4: .string "i=%d %g %g\n" .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC5: .string "Je\305\274eli widzisz ten napis, to program dzia\305\202a poprawnie\n" .text .globl main .type main, @function main: .LFB2058: .cfi_startproc endbr64 pushq %r13 .cfi_def_cfa_offset 16 .cfi_offset 13, -16 pushq %r12 .cfi_def_cfa_offset 24 .cfi_offset 12, -24 pushq %rbp .cfi_def_cfa_offset 32 .cfi_offset 6, -32 pushq %rbx .cfi_def_cfa_offset 40 .cfi_offset 3, -40 subq $56, %rsp .cfi_def_cfa_offset 96 movq %fs:40, %rax movq %rax, 40(%rsp) xorl %eax, %eax movl $100000000, %edx leaq .LC1(%rip), %rsi movl $2, %edi call __printf_chk@PLT movl $800000000, %edi call _Znam@PLT movq %rax, %rbp movl $800000000, %edi call _Znam@PLT movq %rax, %r12 leaq 8(%rsp), %rdi movl $800000000, %esi call cudaMalloc@PLT movl $0, %eax movsd .LC0(%rip), %xmm1 .L16: pxor %xmm0, %xmm0 cvtsi2sdl %eax, %xmm0 addsd %xmm1, %xmm0 movsd %xmm0, 0(%rbp,%rax,8) movq $0x000000000, (%r12,%rax,8) addq $1, %rax cmpq $100000000, %rax jne .L16 movl $4, %ecx movl $800000000, %edx movq %rbp, %rsi movq 8(%rsp), %rdi call cudaMemcpy@PLT movl $100000000, %esi movq %rbp, %rdi call _Z20incrementArrayOnHostPdi movl $1, 20(%rsp) movl $1, 24(%rsp) movl $1, 36(%rsp) movl $3, %ecx movl $65535, %edx leaq .LC3(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $65535, 28(%rsp) movl $3, 32(%rsp) movl $512, 16(%rsp) movl 24(%rsp), %ecx movl $0, %r9d movl $0, %r8d movq 16(%rsp), %rdx movq 28(%rsp), %rdi movl 36(%rsp), %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L25 .L17: call cudaDeviceSynchronize@PLT movl $4, %ecx movl $800000000, %edx movq 8(%rsp), %rsi movq %r12, %rdi call cudaMemcpy@PLT movl $0, %ebx movl $1, %r13d jmp .L21 .L25: movl $100000000, %esi movq 8(%rsp), %rdi call _Z43__device_stub__Z22incrementArrayOnDevicePdiPdi jmp .L17 .L18: addq $1, %rbx cmpq $100000000, %rbx je .L26 .L21: movsd 0(%rbp,%rbx,8), %xmm0 movsd (%r12,%rbx,8), %xmm1 cmpl $99, %ebx jg .L18 ucomisd %xmm1, %xmm0 setp %al cmovne %r13d, %eax testb %al, %al je .L18 movl %ebx, %edx leaq .LC4(%rip), %rsi movl $2, %edi movl $2, %eax call __printf_chk@PLT addq $1, %rbx jmp .L21 .L26: movq %rbp, %rdi call _ZdaPv@PLT movq %r12, %rdi call _ZdaPv@PLT movq 8(%rsp), %rdi call cudaFree@PLT leaq .LC5(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movq 40(%rsp), %rax subq %fs:40, %rax jne .L27 movl $0, %eax addq $56, %rsp .cfi_remember_state .cfi_def_cfa_offset 40 popq %rbx .cfi_def_cfa_offset 32 popq %rbp .cfi_def_cfa_offset 24 popq %r12 .cfi_def_cfa_offset 16 popq %r13 .cfi_def_cfa_offset 8 ret .L27: .cfi_restore_state call __stack_chk_fail@PLT .cfi_endproc .LFE2058: .size main, .-main .section .rodata.str1.1 .LC6: .string "_Z22incrementArrayOnDevicePdi" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2086: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC6(%rip), %rdx movq %rdx, %rcx leaq _Z22incrementArrayOnDevicePdi(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2086: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .section .rodata.cst8,"aM",@progbits,8 .align 8 .LC0: .long 0 .long 1072693248 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "4.hip" .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 # -- Begin function _Z20incrementArrayOnHostPdi .LCPI0_0: .quad 0x3ff0000000000000 # double 1 .text .globl _Z20incrementArrayOnHostPdi .p2align 4, 0x90 .type _Z20incrementArrayOnHostPdi,@function _Z20incrementArrayOnHostPdi: # @_Z20incrementArrayOnHostPdi .cfi_startproc # %bb.0: testl %esi, %esi jle .LBB0_3 # %bb.1: # %.lr.ph.preheader movl %esi, %eax xorl %ecx, %ecx movsd .LCPI0_0(%rip), %xmm0 # xmm0 = mem[0],zero .p2align 4, 0x90 .LBB0_2: # %.lr.ph # =>This Inner Loop Header: Depth=1 movsd (%rdi,%rcx,8), %xmm1 # xmm1 = mem[0],zero addsd %xmm0, %xmm1 movsd %xmm1, (%rdi,%rcx,8) incq %rcx cmpq %rcx, %rax jne .LBB0_2 .LBB0_3: # %._crit_edge retq .Lfunc_end0: .size _Z20incrementArrayOnHostPdi, .Lfunc_end0-_Z20incrementArrayOnHostPdi .cfi_endproc # -- End function .globl _Z37__device_stub__incrementArrayOnDevicePdi # -- Begin function _Z37__device_stub__incrementArrayOnDevicePdi .p2align 4, 0x90 .type _Z37__device_stub__incrementArrayOnDevicePdi,@function _Z37__device_stub__incrementArrayOnDevicePdi: # @_Z37__device_stub__incrementArrayOnDevicePdi .cfi_startproc # %bb.0: subq $88, %rsp .cfi_def_cfa_offset 96 movq %rdi, 56(%rsp) movl %esi, 4(%rsp) leaq 56(%rsp), %rax movq %rax, 64(%rsp) leaq 4(%rsp), %rax movq %rax, 72(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 64(%rsp), %r9 movl $_Z22incrementArrayOnDevicePdi, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $104, %rsp .cfi_adjust_cfa_offset -104 retq .Lfunc_end1: .size _Z37__device_stub__incrementArrayOnDevicePdi, .Lfunc_end1-_Z37__device_stub__incrementArrayOnDevicePdi .cfi_endproc # -- End function .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 # -- Begin function main .LCPI2_0: .quad 0x3ff0000000000000 # double 1 .text .globl main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %rbx .cfi_def_cfa_offset 32 subq $96, %rsp .cfi_def_cfa_offset 128 .cfi_offset %rbx, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 xorl %r15d, %r15d movl $.L.str, %edi movl $100000000, %esi # imm = 0x5F5E100 xorl %eax, %eax callq printf movl $800000000, %edi # imm = 0x2FAF0800 callq _Znam movq %rax, %rbx movl $800000000, %edi # imm = 0x2FAF0800 callq _Znam movq %rax, %r14 leaq 8(%rsp), %rdi movl $800000000, %esi # imm = 0x2FAF0800 callq hipMalloc movl $800000000, %edx # imm = 0x2FAF0800 movq %r14, %rdi xorl %esi, %esi callq memset@PLT .p2align 4, 0x90 .LBB2_1: # =>This Inner Loop Header: Depth=1 leaq 1(%r15), %rax xorps %xmm0, %xmm0 cvtsi2sd %eax, %xmm0 movsd %xmm0, (%rbx,%r15,8) movq %rax, %r15 cmpq $100000000, %rax # imm = 0x5F5E100 jne .LBB2_1 # %bb.2: movq 8(%rsp), %rdi movl $800000000, %edx # imm = 0x2FAF0800 movq %rbx, %rsi movl $4, %ecx callq hipMemcpy xorl %eax, %eax movsd .LCPI2_0(%rip), %xmm0 # xmm0 = mem[0],zero .p2align 4, 0x90 .LBB2_3: # %.lr.ph.i # =>This Inner Loop Header: Depth=1 movsd (%rbx,%rax,8), %xmm1 # xmm1 = mem[0],zero addsd %xmm0, %xmm1 movsd %xmm1, (%rbx,%rax,8) incq %rax cmpq $100000000, %rax # imm = 0x5F5E100 jne .LBB2_3 # %bb.4: # %_Z20incrementArrayOnHostPdi.exit movl $.L.str.1, %edi movl $65535, %esi # imm = 0xFFFF movl $3, %edx xorl %eax, %eax callq printf movabsq $12884967423, %rdi # imm = 0x30000FFFF movabsq $4294967808, %rdx # imm = 0x100000200 movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB2_6 # %bb.5: movq 8(%rsp), %rax movq %rax, 72(%rsp) movl $100000000, 20(%rsp) # imm = 0x5F5E100 leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 20(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rdi leaq 40(%rsp), %rsi leaq 32(%rsp), %rdx leaq 24(%rsp), %rcx callq __hipPopCallConfiguration movq 56(%rsp), %rsi movl 64(%rsp), %edx movq 40(%rsp), %rcx movl 48(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z22incrementArrayOnDevicePdi, %edi pushq 24(%rsp) .cfi_adjust_cfa_offset 8 pushq 40(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB2_6: callq hipDeviceSynchronize movq 8(%rsp), %rsi movl $800000000, %edx # imm = 0x2FAF0800 movq %r14, %rdi movl $4, %ecx callq hipMemcpy xorl %r15d, %r15d jmp .LBB2_7 .p2align 4, 0x90 .LBB2_10: # in Loop: Header=BB2_7 Depth=1 incq %r15 cmpq $100000000, %r15 # imm = 0x5F5E100 je .LBB2_11 .LBB2_7: # =>This Inner Loop Header: Depth=1 cmpq $99, %r15 ja .LBB2_10 # %bb.8: # in Loop: Header=BB2_7 Depth=1 movsd (%rbx,%r15,8), %xmm0 # xmm0 = mem[0],zero movsd (%r14,%r15,8), %xmm1 # xmm1 = mem[0],zero ucomisd %xmm1, %xmm0 jne .LBB2_9 jnp .LBB2_10 .LBB2_9: # in Loop: Header=BB2_7 Depth=1 movl $.L.str.2, %edi movl %r15d, %esi movb $2, %al callq printf jmp .LBB2_10 .LBB2_11: movq %rbx, %rdi callq _ZdaPv movq %r14, %rdi callq _ZdaPv movq 8(%rsp), %rdi callq hipFree movl $.Lstr, %edi callq puts@PLT xorl %eax, %eax addq $96, %rsp .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 retq .Lfunc_end2: .size main, .Lfunc_end2-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB3_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB3_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z22incrementArrayOnDevicePdi, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end3: .size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB4_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB4_2: retq .Lfunc_end4: .size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor .cfi_endproc # -- End function .type _Z22incrementArrayOnDevicePdi,@object # @_Z22incrementArrayOnDevicePdi .section .rodata,"a",@progbits .globl _Z22incrementArrayOnDevicePdi .p2align 3, 0x0 _Z22incrementArrayOnDevicePdi: .quad _Z37__device_stub__incrementArrayOnDevicePdi .size _Z22incrementArrayOnDevicePdi, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "N = %d\n" .size .L.str, 8 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz "%d %d\n" .size .L.str.1, 7 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz "i=%d %g %g\n" .size .L.str.2, 12 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z22incrementArrayOnDevicePdi" .size .L__unnamed_1, 30 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .type .Lstr,@object # @str .section .rodata.str1.1,"aMS",@progbits,1 .Lstr: .asciz "Je\305\274eli widzisz ten napis, to program dzia\305\202a poprawnie" .size .Lstr, 56 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z37__device_stub__incrementArrayOnDevicePdi .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z22incrementArrayOnDevicePdi .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
template<typename T> __device__ void columnsIndices(const T* matrix, int* result, const int rows, const int cols) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int row = by * blockDim.y + ty; int col = bx * blockDim.x + tx; if (row < rows && col < cols) { int ij = row * cols + col; result[ij] = col; } } template<typename T> __device__ void rowsIndices(const T* matrix, int* result, const int rows, const int cols) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int row = by * blockDim.y + ty; int col = bx * blockDim.x + tx; if (row < rows && col < cols) { int ij = row * cols + col; result[ij] = row; } }
code for sm_80
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
template<typename T> __device__ void columnsIndices(const T* matrix, int* result, const int rows, const int cols) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int row = by * blockDim.y + ty; int col = bx * blockDim.x + tx; if (row < rows && col < cols) { int ij = row * cols + col; result[ij] = col; } } template<typename T> __device__ void rowsIndices(const T* matrix, int* result, const int rows, const int cols) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int row = by * blockDim.y + ty; int col = bx * blockDim.x + tx; if (row < rows && col < cols) { int ij = row * cols + col; result[ij] = row; } }
.file "tmpxft_00009a50_00000000-6_Indices.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2031: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2031: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
template<typename T> __device__ void columnsIndices(const T* matrix, int* result, const int rows, const int cols) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int row = by * blockDim.y + ty; int col = bx * blockDim.x + tx; if (row < rows && col < cols) { int ij = row * cols + col; result[ij] = col; } } template<typename T> __device__ void rowsIndices(const T* matrix, int* result, const int rows, const int cols) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int row = by * blockDim.y + ty; int col = bx * blockDim.x + tx; if (row < rows && col < cols) { int ij = row * cols + col; result[ij] = row; } }
#include <hip/hip_runtime.h> template<typename T> __device__ void columnsIndices(const T* matrix, int* result, const int rows, const int cols) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int row = by * blockDim.y + ty; int col = bx * blockDim.x + tx; if (row < rows && col < cols) { int ij = row * cols + col; result[ij] = col; } } template<typename T> __device__ void rowsIndices(const T* matrix, int* result, const int rows, const int cols) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int row = by * blockDim.y + ty; int col = bx * blockDim.x + tx; if (row < rows && col < cols) { int ij = row * cols + col; result[ij] = row; } }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> template<typename T> __device__ void columnsIndices(const T* matrix, int* result, const int rows, const int cols) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int row = by * blockDim.y + ty; int col = bx * blockDim.x + tx; if (row < rows && col < cols) { int ij = row * cols + col; result[ij] = col; } } template<typename T> __device__ void rowsIndices(const T* matrix, int* result, const int rows, const int cols) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int row = by * blockDim.y + ty; int col = bx * blockDim.x + tx; if (row < rows && col < cols) { int ij = row * cols + col; result[ij] = row; } }
.text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .amdgpu_metadata --- amdhsa.kernels: [] amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> template<typename T> __device__ void columnsIndices(const T* matrix, int* result, const int rows, const int cols) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int row = by * blockDim.y + ty; int col = bx * blockDim.x + tx; if (row < rows && col < cols) { int ij = row * cols + col; result[ij] = col; } } template<typename T> __device__ void rowsIndices(const T* matrix, int* result, const int rows, const int cols) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int row = by * blockDim.y + ty; int col = bx * blockDim.x + tx; if (row < rows && col < cols) { int ij = row * cols + col; result[ij] = row; } }
.text .file "Indices.hip" .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80
.text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .amdgpu_metadata --- amdhsa.kernels: [] amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_00009a50_00000000-6_Indices.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2031: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2031: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "Indices.hip" .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include "includes.h" __global__ void readOffsetUnroll2(float *A, float *B, float *C, const int n, int offset) { unsigned int i = blockIdx.x * blockDim.x * 2 + threadIdx.x; unsigned int k = i + offset; if (k < n) C[i] = A[k] + B[k]; if (k + blockDim.x < n) { C[i + blockDim.x] = A[k + blockDim.x] + B[k + blockDim.x]; } }
code for sm_80 Function : _Z17readOffsetUnroll2PfS_S_ii .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */ /* 0x000e220000002500 */ /*0020*/ ULDC UR4, c[0x0][0x0] ; /* 0x0000000000047ab9 */ /* 0x000fe20000000800 */ /*0030*/ BSSY B0, 0x160 ; /* 0x0000012000007945 */ /* 0x000fe20003800000 */ /*0040*/ USHF.L.U32 UR4, UR4, 0x1, URZ ; /* 0x0000000104047899 */ /* 0x000fe2000800063f */ /*0050*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e2a0000002100 */ /*0060*/ IMAD R0, R0, UR4, R3 ; /* 0x0000000400007c24 */ /* 0x001fe2000f8e0203 */ /*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fc80000000a00 */ /*0080*/ IADD3 R2, R0, c[0x0][0x17c], RZ ; /* 0x00005f0000027a10 */ /* 0x000fc80007ffe0ff */ /*0090*/ ISETP.GE.U32.AND P0, PT, R2.reuse, c[0x0][0x178], PT ; /* 0x00005e0002007a0c */ /* 0x040fe40003f06070 */ /*00a0*/ IADD3 R8, R2, c[0x0][0x0], RZ ; /* 0x0000000002087a10 */ /* 0x000fc80007ffe0ff */ /*00b0*/ ISETP.GE.U32.AND P1, PT, R8, c[0x0][0x178], PT ; /* 0x00005e0008007a0c */ /* 0x000fce0003f26070 */ /*00c0*/ @P0 BRA 0x150 ; /* 0x0000008000000947 */ /* 0x000fea0003800000 */ /*00d0*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */ /* 0x000fd400000001ff */ /*00e0*/ IMAD.WIDE.U32 R4, R2, R7, c[0x0][0x168] ; /* 0x00005a0002047625 */ /* 0x000fc800078e0007 */ /*00f0*/ IMAD.WIDE.U32 R2, R2, R7.reuse, c[0x0][0x160] ; /* 0x0000580002027625 */ /* 0x080fe400078e0007 */ /*0100*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */ /* 0x000ea8000c1e1900 */ /*0110*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */ /* 0x000ea2000c1e1900 */ /*0120*/ IMAD.WIDE.U32 R6, R0, R7, c[0x0][0x170] ; /* 0x00005c0000067625 */ /* 0x000fc800078e0007 */ /*0130*/ FADD R9, R4, R3 ; /* 0x0000000304097221 */ /* 0x004fca0000000000 */ /*0140*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */ /* 0x0001e4000c101904 */ /*0150*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*0160*/ @P1 EXIT ; /* 0x000000000000194d */ /* 0x000fea0003800000 */ /*0170*/ MOV R7, 0x4 ; /* 0x0000000400077802 */ /* 0x001fca0000000f00 */ /*0180*/ IMAD.WIDE.U32 R2, R8, R7, c[0x0][0x160] ; /* 0x0000580008027625 */ /* 0x000fc800078e0007 */ /*0190*/ IMAD.WIDE.U32 R4, R8, R7, c[0x0][0x168] ; /* 0x00005a0008047625 */ /* 0x000fe400078e0007 */ /*01a0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */ /* 0x000ea8000c1e1900 */ /*01b0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */ /* 0x000ea2000c1e1900 */ /*01c0*/ IADD3 R6, R0, c[0x0][0x0], RZ ; /* 0x0000000000067a10 */ /* 0x000fca0007ffe0ff */ /*01d0*/ IMAD.WIDE.U32 R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */ /* 0x000fc800078e0007 */ /*01e0*/ FADD R9, R4, R3 ; /* 0x0000000304097221 */ /* 0x004fca0000000000 */ /*01f0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */ /* 0x000fe2000c101904 */ /*0200*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0210*/ BRA 0x210; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0220*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0230*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0240*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0250*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0260*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0270*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0280*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0290*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include "includes.h" __global__ void readOffsetUnroll2(float *A, float *B, float *C, const int n, int offset) { unsigned int i = blockIdx.x * blockDim.x * 2 + threadIdx.x; unsigned int k = i + offset; if (k < n) C[i] = A[k] + B[k]; if (k + blockDim.x < n) { C[i + blockDim.x] = A[k + blockDim.x] + B[k + blockDim.x]; } }
.file "tmpxft_00050814_00000000-6_readOffsetUnroll2.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z43__device_stub__Z17readOffsetUnroll2PfS_S_iiPfS_S_ii .type _Z43__device_stub__Z17readOffsetUnroll2PfS_S_iiPfS_S_ii, @function _Z43__device_stub__Z17readOffsetUnroll2PfS_S_iiPfS_S_ii: .LFB2051: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movl %ecx, 4(%rsp) movl %r8d, (%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) leaq 4(%rsp), %rax movq %rax, 120(%rsp) movq %rsp, %rax movq %rax, 128(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 136(%rsp), %rax subq %fs:40, %rax jne .L8 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z17readOffsetUnroll2PfS_S_ii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z43__device_stub__Z17readOffsetUnroll2PfS_S_iiPfS_S_ii, .-_Z43__device_stub__Z17readOffsetUnroll2PfS_S_iiPfS_S_ii .globl _Z17readOffsetUnroll2PfS_S_ii .type _Z17readOffsetUnroll2PfS_S_ii, @function _Z17readOffsetUnroll2PfS_S_ii: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z43__device_stub__Z17readOffsetUnroll2PfS_S_iiPfS_S_ii addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _Z17readOffsetUnroll2PfS_S_ii, .-_Z17readOffsetUnroll2PfS_S_ii .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z17readOffsetUnroll2PfS_S_ii" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z17readOffsetUnroll2PfS_S_ii(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include "includes.h" __global__ void readOffsetUnroll2(float *A, float *B, float *C, const int n, int offset) { unsigned int i = blockIdx.x * blockDim.x * 2 + threadIdx.x; unsigned int k = i + offset; if (k < n) C[i] = A[k] + B[k]; if (k + blockDim.x < n) { C[i + blockDim.x] = A[k + blockDim.x] + B[k + blockDim.x]; } }
#include <hip/hip_runtime.h> #include "includes.h" __global__ void readOffsetUnroll2(float *A, float *B, float *C, const int n, int offset) { unsigned int i = blockIdx.x * blockDim.x * 2 + threadIdx.x; unsigned int k = i + offset; if (k < n) C[i] = A[k] + B[k]; if (k + blockDim.x < n) { C[i + blockDim.x] = A[k + blockDim.x] + B[k + blockDim.x]; } }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include "includes.h" __global__ void readOffsetUnroll2(float *A, float *B, float *C, const int n, int offset) { unsigned int i = blockIdx.x * blockDim.x * 2 + threadIdx.x; unsigned int k = i + offset; if (k < n) C[i] = A[k] + B[k]; if (k + blockDim.x < n) { C[i + blockDim.x] = A[k + blockDim.x] + B[k + blockDim.x]; } }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z17readOffsetUnroll2PfS_S_ii .globl _Z17readOffsetUnroll2PfS_S_ii .p2align 8 .type _Z17readOffsetUnroll2PfS_S_ii,@function _Z17readOffsetUnroll2PfS_S_ii: s_clause 0x1 s_load_b32 s8, s[0:1], 0x2c s_load_b256 s[0:7], s[0:1], 0x0 s_waitcnt lgkmcnt(0) s_and_b32 s8, s8, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_mul_i32 s15, s15, s8 v_lshl_add_u32 v0, s15, 1, v0 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_add_nc_u32_e32 v2, s7, v0 s_mov_b32 s7, exec_lo v_cmpx_gt_u32_e64 s6, v2 s_cbranch_execz .LBB0_2 v_mov_b32_e32 v3, 0 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_lshlrev_b64 v[4:5], 2, v[2:3] v_mov_b32_e32 v1, v3 v_add_co_u32 v6, vcc_lo, s0, v4 s_delay_alu instid0(VALU_DEP_3) v_add_co_ci_u32_e32 v7, vcc_lo, s1, v5, vcc_lo v_add_co_u32 v4, vcc_lo, s2, v4 v_add_co_ci_u32_e32 v5, vcc_lo, s3, v5, vcc_lo global_load_b32 v6, v[6:7], off global_load_b32 v5, v[4:5], off v_lshlrev_b64 v[3:4], 2, v[0:1] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v3, vcc_lo, s4, v3 v_add_co_ci_u32_e32 v4, vcc_lo, s5, v4, vcc_lo s_waitcnt vmcnt(0) v_add_f32_e32 v1, v6, v5 global_store_b32 v[3:4], v1, off .LBB0_2: s_or_b32 exec_lo, exec_lo, s7 v_add_nc_u32_e32 v1, s8, v2 s_delay_alu instid0(VALU_DEP_1) v_cmp_gt_u32_e32 vcc_lo, s6, v1 s_and_saveexec_b32 s6, vcc_lo s_cbranch_execz .LBB0_4 v_mov_b32_e32 v2, 0 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b64 v[3:4], 2, v[1:2] v_add_nc_u32_e32 v1, s8, v0 v_lshlrev_b64 v[0:1], 2, v[1:2] s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4) v_add_co_u32 v5, vcc_lo, s0, v3 v_add_co_ci_u32_e32 v6, vcc_lo, s1, v4, vcc_lo v_add_co_u32 v3, vcc_lo, s2, v3 v_add_co_ci_u32_e32 v4, vcc_lo, s3, v4, vcc_lo global_load_b32 v5, v[5:6], off global_load_b32 v3, v[3:4], off v_add_co_u32 v0, vcc_lo, s4, v0 v_add_co_ci_u32_e32 v1, vcc_lo, s5, v1, vcc_lo s_waitcnt vmcnt(0) v_add_f32_e32 v2, v5, v3 global_store_b32 v[0:1], v2, off .LBB0_4: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z17readOffsetUnroll2PfS_S_ii .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 288 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 8 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z17readOffsetUnroll2PfS_S_ii, .Lfunc_end0-_Z17readOffsetUnroll2PfS_S_ii .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: by_value - .offset: 28 .size: 4 .value_kind: by_value - .offset: 32 .size: 4 .value_kind: hidden_block_count_x - .offset: 36 .size: 4 .value_kind: hidden_block_count_y - .offset: 40 .size: 4 .value_kind: hidden_block_count_z - .offset: 44 .size: 2 .value_kind: hidden_group_size_x - .offset: 46 .size: 2 .value_kind: hidden_group_size_y - .offset: 48 .size: 2 .value_kind: hidden_group_size_z - .offset: 50 .size: 2 .value_kind: hidden_remainder_x - .offset: 52 .size: 2 .value_kind: hidden_remainder_y - .offset: 54 .size: 2 .value_kind: hidden_remainder_z - .offset: 72 .size: 8 .value_kind: hidden_global_offset_x - .offset: 80 .size: 8 .value_kind: hidden_global_offset_y - .offset: 88 .size: 8 .value_kind: hidden_global_offset_z - .offset: 96 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 288 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z17readOffsetUnroll2PfS_S_ii .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z17readOffsetUnroll2PfS_S_ii.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 8 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include "includes.h" __global__ void readOffsetUnroll2(float *A, float *B, float *C, const int n, int offset) { unsigned int i = blockIdx.x * blockDim.x * 2 + threadIdx.x; unsigned int k = i + offset; if (k < n) C[i] = A[k] + B[k]; if (k + blockDim.x < n) { C[i + blockDim.x] = A[k + blockDim.x] + B[k + blockDim.x]; } }
.text .file "readOffsetUnroll2.hip" .globl _Z32__device_stub__readOffsetUnroll2PfS_S_ii # -- Begin function _Z32__device_stub__readOffsetUnroll2PfS_S_ii .p2align 4, 0x90 .type _Z32__device_stub__readOffsetUnroll2PfS_S_ii,@function _Z32__device_stub__readOffsetUnroll2PfS_S_ii: # @_Z32__device_stub__readOffsetUnroll2PfS_S_ii .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movl %ecx, 4(%rsp) movl %r8d, (%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) movq %rsp, %rax movq %rax, 112(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z17readOffsetUnroll2PfS_S_ii, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end0: .size _Z32__device_stub__readOffsetUnroll2PfS_S_ii, .Lfunc_end0-_Z32__device_stub__readOffsetUnroll2PfS_S_ii .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z17readOffsetUnroll2PfS_S_ii, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z17readOffsetUnroll2PfS_S_ii,@object # @_Z17readOffsetUnroll2PfS_S_ii .section .rodata,"a",@progbits .globl _Z17readOffsetUnroll2PfS_S_ii .p2align 3, 0x0 _Z17readOffsetUnroll2PfS_S_ii: .quad _Z32__device_stub__readOffsetUnroll2PfS_S_ii .size _Z17readOffsetUnroll2PfS_S_ii, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z17readOffsetUnroll2PfS_S_ii" .size .L__unnamed_1, 30 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z32__device_stub__readOffsetUnroll2PfS_S_ii .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z17readOffsetUnroll2PfS_S_ii .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z17readOffsetUnroll2PfS_S_ii .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */ /* 0x000e220000002500 */ /*0020*/ ULDC UR4, c[0x0][0x0] ; /* 0x0000000000047ab9 */ /* 0x000fe20000000800 */ /*0030*/ BSSY B0, 0x160 ; /* 0x0000012000007945 */ /* 0x000fe20003800000 */ /*0040*/ USHF.L.U32 UR4, UR4, 0x1, URZ ; /* 0x0000000104047899 */ /* 0x000fe2000800063f */ /*0050*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e2a0000002100 */ /*0060*/ IMAD R0, R0, UR4, R3 ; /* 0x0000000400007c24 */ /* 0x001fe2000f8e0203 */ /*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fc80000000a00 */ /*0080*/ IADD3 R2, R0, c[0x0][0x17c], RZ ; /* 0x00005f0000027a10 */ /* 0x000fc80007ffe0ff */ /*0090*/ ISETP.GE.U32.AND P0, PT, R2.reuse, c[0x0][0x178], PT ; /* 0x00005e0002007a0c */ /* 0x040fe40003f06070 */ /*00a0*/ IADD3 R8, R2, c[0x0][0x0], RZ ; /* 0x0000000002087a10 */ /* 0x000fc80007ffe0ff */ /*00b0*/ ISETP.GE.U32.AND P1, PT, R8, c[0x0][0x178], PT ; /* 0x00005e0008007a0c */ /* 0x000fce0003f26070 */ /*00c0*/ @P0 BRA 0x150 ; /* 0x0000008000000947 */ /* 0x000fea0003800000 */ /*00d0*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */ /* 0x000fd400000001ff */ /*00e0*/ IMAD.WIDE.U32 R4, R2, R7, c[0x0][0x168] ; /* 0x00005a0002047625 */ /* 0x000fc800078e0007 */ /*00f0*/ IMAD.WIDE.U32 R2, R2, R7.reuse, c[0x0][0x160] ; /* 0x0000580002027625 */ /* 0x080fe400078e0007 */ /*0100*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */ /* 0x000ea8000c1e1900 */ /*0110*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */ /* 0x000ea2000c1e1900 */ /*0120*/ IMAD.WIDE.U32 R6, R0, R7, c[0x0][0x170] ; /* 0x00005c0000067625 */ /* 0x000fc800078e0007 */ /*0130*/ FADD R9, R4, R3 ; /* 0x0000000304097221 */ /* 0x004fca0000000000 */ /*0140*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */ /* 0x0001e4000c101904 */ /*0150*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*0160*/ @P1 EXIT ; /* 0x000000000000194d */ /* 0x000fea0003800000 */ /*0170*/ MOV R7, 0x4 ; /* 0x0000000400077802 */ /* 0x001fca0000000f00 */ /*0180*/ IMAD.WIDE.U32 R2, R8, R7, c[0x0][0x160] ; /* 0x0000580008027625 */ /* 0x000fc800078e0007 */ /*0190*/ IMAD.WIDE.U32 R4, R8, R7, c[0x0][0x168] ; /* 0x00005a0008047625 */ /* 0x000fe400078e0007 */ /*01a0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */ /* 0x000ea8000c1e1900 */ /*01b0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */ /* 0x000ea2000c1e1900 */ /*01c0*/ IADD3 R6, R0, c[0x0][0x0], RZ ; /* 0x0000000000067a10 */ /* 0x000fca0007ffe0ff */ /*01d0*/ IMAD.WIDE.U32 R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */ /* 0x000fc800078e0007 */ /*01e0*/ FADD R9, R4, R3 ; /* 0x0000000304097221 */ /* 0x004fca0000000000 */ /*01f0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */ /* 0x000fe2000c101904 */ /*0200*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0210*/ BRA 0x210; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0220*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0230*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0240*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0250*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0260*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0270*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0280*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0290*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z17readOffsetUnroll2PfS_S_ii .globl _Z17readOffsetUnroll2PfS_S_ii .p2align 8 .type _Z17readOffsetUnroll2PfS_S_ii,@function _Z17readOffsetUnroll2PfS_S_ii: s_clause 0x1 s_load_b32 s8, s[0:1], 0x2c s_load_b256 s[0:7], s[0:1], 0x0 s_waitcnt lgkmcnt(0) s_and_b32 s8, s8, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_mul_i32 s15, s15, s8 v_lshl_add_u32 v0, s15, 1, v0 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_add_nc_u32_e32 v2, s7, v0 s_mov_b32 s7, exec_lo v_cmpx_gt_u32_e64 s6, v2 s_cbranch_execz .LBB0_2 v_mov_b32_e32 v3, 0 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_lshlrev_b64 v[4:5], 2, v[2:3] v_mov_b32_e32 v1, v3 v_add_co_u32 v6, vcc_lo, s0, v4 s_delay_alu instid0(VALU_DEP_3) v_add_co_ci_u32_e32 v7, vcc_lo, s1, v5, vcc_lo v_add_co_u32 v4, vcc_lo, s2, v4 v_add_co_ci_u32_e32 v5, vcc_lo, s3, v5, vcc_lo global_load_b32 v6, v[6:7], off global_load_b32 v5, v[4:5], off v_lshlrev_b64 v[3:4], 2, v[0:1] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v3, vcc_lo, s4, v3 v_add_co_ci_u32_e32 v4, vcc_lo, s5, v4, vcc_lo s_waitcnt vmcnt(0) v_add_f32_e32 v1, v6, v5 global_store_b32 v[3:4], v1, off .LBB0_2: s_or_b32 exec_lo, exec_lo, s7 v_add_nc_u32_e32 v1, s8, v2 s_delay_alu instid0(VALU_DEP_1) v_cmp_gt_u32_e32 vcc_lo, s6, v1 s_and_saveexec_b32 s6, vcc_lo s_cbranch_execz .LBB0_4 v_mov_b32_e32 v2, 0 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_lshlrev_b64 v[3:4], 2, v[1:2] v_add_nc_u32_e32 v1, s8, v0 v_lshlrev_b64 v[0:1], 2, v[1:2] s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4) v_add_co_u32 v5, vcc_lo, s0, v3 v_add_co_ci_u32_e32 v6, vcc_lo, s1, v4, vcc_lo v_add_co_u32 v3, vcc_lo, s2, v3 v_add_co_ci_u32_e32 v4, vcc_lo, s3, v4, vcc_lo global_load_b32 v5, v[5:6], off global_load_b32 v3, v[3:4], off v_add_co_u32 v0, vcc_lo, s4, v0 v_add_co_ci_u32_e32 v1, vcc_lo, s5, v1, vcc_lo s_waitcnt vmcnt(0) v_add_f32_e32 v2, v5, v3 global_store_b32 v[0:1], v2, off .LBB0_4: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z17readOffsetUnroll2PfS_S_ii .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 288 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 8 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z17readOffsetUnroll2PfS_S_ii, .Lfunc_end0-_Z17readOffsetUnroll2PfS_S_ii .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 16 .size: 8 .value_kind: global_buffer - .offset: 24 .size: 4 .value_kind: by_value - .offset: 28 .size: 4 .value_kind: by_value - .offset: 32 .size: 4 .value_kind: hidden_block_count_x - .offset: 36 .size: 4 .value_kind: hidden_block_count_y - .offset: 40 .size: 4 .value_kind: hidden_block_count_z - .offset: 44 .size: 2 .value_kind: hidden_group_size_x - .offset: 46 .size: 2 .value_kind: hidden_group_size_y - .offset: 48 .size: 2 .value_kind: hidden_group_size_z - .offset: 50 .size: 2 .value_kind: hidden_remainder_x - .offset: 52 .size: 2 .value_kind: hidden_remainder_y - .offset: 54 .size: 2 .value_kind: hidden_remainder_z - .offset: 72 .size: 8 .value_kind: hidden_global_offset_x - .offset: 80 .size: 8 .value_kind: hidden_global_offset_y - .offset: 88 .size: 8 .value_kind: hidden_global_offset_z - .offset: 96 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 288 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z17readOffsetUnroll2PfS_S_ii .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z17readOffsetUnroll2PfS_S_ii.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 8 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_00050814_00000000-6_readOffsetUnroll2.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z43__device_stub__Z17readOffsetUnroll2PfS_S_iiPfS_S_ii .type _Z43__device_stub__Z17readOffsetUnroll2PfS_S_iiPfS_S_ii, @function _Z43__device_stub__Z17readOffsetUnroll2PfS_S_iiPfS_S_ii: .LFB2051: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movl %ecx, 4(%rsp) movl %r8d, (%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) leaq 4(%rsp), %rax movq %rax, 120(%rsp) movq %rsp, %rax movq %rax, 128(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 136(%rsp), %rax subq %fs:40, %rax jne .L8 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z17readOffsetUnroll2PfS_S_ii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z43__device_stub__Z17readOffsetUnroll2PfS_S_iiPfS_S_ii, .-_Z43__device_stub__Z17readOffsetUnroll2PfS_S_iiPfS_S_ii .globl _Z17readOffsetUnroll2PfS_S_ii .type _Z17readOffsetUnroll2PfS_S_ii, @function _Z17readOffsetUnroll2PfS_S_ii: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z43__device_stub__Z17readOffsetUnroll2PfS_S_iiPfS_S_ii addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _Z17readOffsetUnroll2PfS_S_ii, .-_Z17readOffsetUnroll2PfS_S_ii .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z17readOffsetUnroll2PfS_S_ii" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z17readOffsetUnroll2PfS_S_ii(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "readOffsetUnroll2.hip" .globl _Z32__device_stub__readOffsetUnroll2PfS_S_ii # -- Begin function _Z32__device_stub__readOffsetUnroll2PfS_S_ii .p2align 4, 0x90 .type _Z32__device_stub__readOffsetUnroll2PfS_S_ii,@function _Z32__device_stub__readOffsetUnroll2PfS_S_ii: # @_Z32__device_stub__readOffsetUnroll2PfS_S_ii .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) movl %ecx, 4(%rsp) movl %r8d, (%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) movq %rsp, %rax movq %rax, 112(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z17readOffsetUnroll2PfS_S_ii, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end0: .size _Z32__device_stub__readOffsetUnroll2PfS_S_ii, .Lfunc_end0-_Z32__device_stub__readOffsetUnroll2PfS_S_ii .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z17readOffsetUnroll2PfS_S_ii, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z17readOffsetUnroll2PfS_S_ii,@object # @_Z17readOffsetUnroll2PfS_S_ii .section .rodata,"a",@progbits .globl _Z17readOffsetUnroll2PfS_S_ii .p2align 3, 0x0 _Z17readOffsetUnroll2PfS_S_ii: .quad _Z32__device_stub__readOffsetUnroll2PfS_S_ii .size _Z17readOffsetUnroll2PfS_S_ii, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z17readOffsetUnroll2PfS_S_ii" .size .L__unnamed_1, 30 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z32__device_stub__readOffsetUnroll2PfS_S_ii .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z17readOffsetUnroll2PfS_S_ii .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include <iostream> #include <string.h> #include <stdio.h> #include <math.h> using namespace std; __global__ void kernel(int* dval, int nword) { int tid = threadIdx.x; int bid = blockIdx.x; int i = blockDim.x*bid + tid; dval[i] = i; } int main( int argc, char** argv) { /* int nb = 65535; // max 65535 int nthre = 512; // max 512 */ int nb = 512; // max 65535 int nthre = 128; // max 512 int nword = nb * nthre; int mem_size = sizeof(int) * nword; printf("# threads: %d \n", nb*nthre); printf("mem_size: %d Kbyte\n", mem_size >> 10); int* hval = (int*) malloc(mem_size); int* dval; cudaMalloc( (void**) &dval, mem_size); dim3 grid(nb); dim3 threads(nthre); kernel<<< grid, threads >>>(dval, nword); cudaMemcpy(hval, dval, mem_size, cudaMemcpyDeviceToHost); for(int i=0; i<nword; i++){ int z = hval[i]; if(i != z){ printf("%d, %d\n", i, z); } } free(hval); cudaFree(dval); return (0); }
code for sm_80 Function : _Z6kernelPii .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */ /* 0x000e220000002100 */ /*0020*/ HFMA2.MMA R2, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff027435 */ /* 0x000fe200000001ff */ /*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe40000000a00 */ /*0040*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */ /* 0x000e240000002500 */ /*0050*/ IMAD R5, R0, c[0x0][0x0], R5 ; /* 0x0000000000057a24 */ /* 0x001fca00078e0205 */ /*0060*/ IMAD.WIDE R2, R5, R2, c[0x0][0x160] ; /* 0x0000580005027625 */ /* 0x000fca00078e0202 */ /*0070*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */ /* 0x000fe2000c101904 */ /*0080*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0090*/ BRA 0x90; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*00a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0100*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0110*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0120*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0130*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include <iostream> #include <string.h> #include <stdio.h> #include <math.h> using namespace std; __global__ void kernel(int* dval, int nword) { int tid = threadIdx.x; int bid = blockIdx.x; int i = blockDim.x*bid + tid; dval[i] = i; } int main( int argc, char** argv) { /* int nb = 65535; // max 65535 int nthre = 512; // max 512 */ int nb = 512; // max 65535 int nthre = 128; // max 512 int nword = nb * nthre; int mem_size = sizeof(int) * nword; printf("# threads: %d \n", nb*nthre); printf("mem_size: %d Kbyte\n", mem_size >> 10); int* hval = (int*) malloc(mem_size); int* dval; cudaMalloc( (void**) &dval, mem_size); dim3 grid(nb); dim3 threads(nthre); kernel<<< grid, threads >>>(dval, nword); cudaMemcpy(hval, dval, mem_size, cudaMemcpyDeviceToHost); for(int i=0; i<nword; i++){ int z = hval[i]; if(i != z){ printf("%d, %d\n", i, z); } } free(hval); cudaFree(dval); return (0); }
.file "tmpxft_00018ad6_00000000-6_test.cudafe1.cpp" .text #APP .globl _ZSt21ios_base_library_initv #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB3672: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3672: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z26__device_stub__Z6kernelPiiPii .type _Z26__device_stub__Z6kernelPiiPii, @function _Z26__device_stub__Z6kernelPiiPii: .LFB3694: .cfi_startproc endbr64 subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 8(%rsp) movl %esi, 4(%rsp) movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) leaq 4(%rsp), %rax movq %rax, 88(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 104(%rsp), %rax subq %fs:40, %rax jne .L8 addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 136 pushq 24(%rsp) .cfi_def_cfa_offset 144 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z6kernelPii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 128 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE3694: .size _Z26__device_stub__Z6kernelPiiPii, .-_Z26__device_stub__Z6kernelPiiPii .globl _Z6kernelPii .type _Z6kernelPii, @function _Z6kernelPii: .LFB3695: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z26__device_stub__Z6kernelPiiPii addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3695: .size _Z6kernelPii, .-_Z6kernelPii .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "# threads: %d \n" .LC1: .string "mem_size: %d Kbyte\n" .LC2: .string "%d, %d\n" .text .globl main .type main, @function main: .LFB3669: .cfi_startproc endbr64 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 subq $48, %rsp .cfi_def_cfa_offset 80 movq %fs:40, %rax movq %rax, 40(%rsp) xorl %eax, %eax movl $65536, %edx leaq .LC0(%rip), %rsi movl $2, %edi call __printf_chk@PLT movl $256, %edx leaq .LC1(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $262144, %edi call malloc@PLT movq %rax, %rbp leaq 8(%rsp), %rdi movl $262144, %esi call cudaMalloc@PLT movl $512, 16(%rsp) movl $1, 20(%rsp) movl $128, 28(%rsp) movl $1, 32(%rsp) movl $0, %r9d movl $0, %r8d movq 28(%rsp), %rdx movl $1, %ecx movq 16(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L18 .L12: movl $2, %ecx movl $262144, %edx movq 8(%rsp), %rsi movq %rbp, %rdi call cudaMemcpy@PLT movl $0, %ebx leaq .LC2(%rip), %r12 jmp .L14 .L18: movl $65536, %esi movq 8(%rsp), %rdi call _Z26__device_stub__Z6kernelPiiPii jmp .L12 .L13: addq $1, %rbx cmpq $65536, %rbx je .L19 .L14: movl 0(%rbp,%rbx,4), %ecx cmpl %ebx, %ecx je .L13 movl %ebx, %edx movq %r12, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT jmp .L13 .L19: movq %rbp, %rdi call free@PLT movq 8(%rsp), %rdi call cudaFree@PLT movq 40(%rsp), %rax subq %fs:40, %rax jne .L20 movl $0, %eax addq $48, %rsp .cfi_remember_state .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .L20: .cfi_restore_state call __stack_chk_fail@PLT .cfi_endproc .LFE3669: .size main, .-main .section .rodata.str1.1 .LC3: .string "_Z6kernelPii" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB3697: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC3(%rip), %rdx movq %rdx, %rcx leaq _Z6kernelPii(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3697: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <iostream> #include <string.h> #include <stdio.h> #include <math.h> using namespace std; __global__ void kernel(int* dval, int nword) { int tid = threadIdx.x; int bid = blockIdx.x; int i = blockDim.x*bid + tid; dval[i] = i; } int main( int argc, char** argv) { /* int nb = 65535; // max 65535 int nthre = 512; // max 512 */ int nb = 512; // max 65535 int nthre = 128; // max 512 int nword = nb * nthre; int mem_size = sizeof(int) * nword; printf("# threads: %d \n", nb*nthre); printf("mem_size: %d Kbyte\n", mem_size >> 10); int* hval = (int*) malloc(mem_size); int* dval; cudaMalloc( (void**) &dval, mem_size); dim3 grid(nb); dim3 threads(nthre); kernel<<< grid, threads >>>(dval, nword); cudaMemcpy(hval, dval, mem_size, cudaMemcpyDeviceToHost); for(int i=0; i<nword; i++){ int z = hval[i]; if(i != z){ printf("%d, %d\n", i, z); } } free(hval); cudaFree(dval); return (0); }
#include <hip/hip_runtime.h> #include <iostream> #include <string.h> #include <stdio.h> #include <math.h> using namespace std; __global__ void kernel(int* dval, int nword) { int tid = threadIdx.x; int bid = blockIdx.x; int i = blockDim.x*bid + tid; dval[i] = i; } int main( int argc, char** argv) { /* int nb = 65535; // max 65535 int nthre = 512; // max 512 */ int nb = 512; // max 65535 int nthre = 128; // max 512 int nword = nb * nthre; int mem_size = sizeof(int) * nword; printf("# threads: %d \n", nb*nthre); printf("mem_size: %d Kbyte\n", mem_size >> 10); int* hval = (int*) malloc(mem_size); int* dval; hipMalloc( (void**) &dval, mem_size); dim3 grid(nb); dim3 threads(nthre); kernel<<< grid, threads >>>(dval, nword); hipMemcpy(hval, dval, mem_size, hipMemcpyDeviceToHost); for(int i=0; i<nword; i++){ int z = hval[i]; if(i != z){ printf("%d, %d\n", i, z); } } free(hval); hipFree(dval); return (0); }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include <iostream> #include <string.h> #include <stdio.h> #include <math.h> using namespace std; __global__ void kernel(int* dval, int nword) { int tid = threadIdx.x; int bid = blockIdx.x; int i = blockDim.x*bid + tid; dval[i] = i; } int main( int argc, char** argv) { /* int nb = 65535; // max 65535 int nthre = 512; // max 512 */ int nb = 512; // max 65535 int nthre = 128; // max 512 int nword = nb * nthre; int mem_size = sizeof(int) * nword; printf("# threads: %d \n", nb*nthre); printf("mem_size: %d Kbyte\n", mem_size >> 10); int* hval = (int*) malloc(mem_size); int* dval; hipMalloc( (void**) &dval, mem_size); dim3 grid(nb); dim3 threads(nthre); kernel<<< grid, threads >>>(dval, nword); hipMemcpy(hval, dval, mem_size, hipMemcpyDeviceToHost); for(int i=0; i<nword; i++){ int z = hval[i]; if(i != z){ printf("%d, %d\n", i, z); } } free(hval); hipFree(dval); return (0); }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z6kernelPii .globl _Z6kernelPii .p2align 8 .type _Z6kernelPii,@function _Z6kernelPii: s_clause 0x1 s_load_b32 s2, s[0:1], 0x1c s_load_b64 s[0:1], s[0:1], 0x0 s_waitcnt lgkmcnt(0) s_and_b32 s2, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1] v_ashrrev_i32_e32 v2, 31, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[2:3], 2, v[1:2] v_add_co_u32 v2, vcc_lo, s0, v2 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v3, vcc_lo, s1, v3, vcc_lo global_store_b32 v[2:3], v1, off s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z6kernelPii .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 272 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 4 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z6kernelPii, .Lfunc_end0-_Z6kernelPii .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .offset: 8 .size: 4 .value_kind: by_value - .offset: 16 .size: 4 .value_kind: hidden_block_count_x - .offset: 20 .size: 4 .value_kind: hidden_block_count_y - .offset: 24 .size: 4 .value_kind: hidden_block_count_z - .offset: 28 .size: 2 .value_kind: hidden_group_size_x - .offset: 30 .size: 2 .value_kind: hidden_group_size_y - .offset: 32 .size: 2 .value_kind: hidden_group_size_z - .offset: 34 .size: 2 .value_kind: hidden_remainder_x - .offset: 36 .size: 2 .value_kind: hidden_remainder_y - .offset: 38 .size: 2 .value_kind: hidden_remainder_z - .offset: 56 .size: 8 .value_kind: hidden_global_offset_x - .offset: 64 .size: 8 .value_kind: hidden_global_offset_y - .offset: 72 .size: 8 .value_kind: hidden_global_offset_z - .offset: 80 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 272 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z6kernelPii .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z6kernelPii.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 4 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include <iostream> #include <string.h> #include <stdio.h> #include <math.h> using namespace std; __global__ void kernel(int* dval, int nword) { int tid = threadIdx.x; int bid = blockIdx.x; int i = blockDim.x*bid + tid; dval[i] = i; } int main( int argc, char** argv) { /* int nb = 65535; // max 65535 int nthre = 512; // max 512 */ int nb = 512; // max 65535 int nthre = 128; // max 512 int nword = nb * nthre; int mem_size = sizeof(int) * nword; printf("# threads: %d \n", nb*nthre); printf("mem_size: %d Kbyte\n", mem_size >> 10); int* hval = (int*) malloc(mem_size); int* dval; hipMalloc( (void**) &dval, mem_size); dim3 grid(nb); dim3 threads(nthre); kernel<<< grid, threads >>>(dval, nword); hipMemcpy(hval, dval, mem_size, hipMemcpyDeviceToHost); for(int i=0; i<nword; i++){ int z = hval[i]; if(i != z){ printf("%d, %d\n", i, z); } } free(hval); hipFree(dval); return (0); }
.text .file "test.hip" # Start of file scope inline assembly .globl _ZSt21ios_base_library_initv # End of file scope inline assembly .globl _Z21__device_stub__kernelPii # -- Begin function _Z21__device_stub__kernelPii .p2align 4, 0x90 .type _Z21__device_stub__kernelPii,@function _Z21__device_stub__kernelPii: # @_Z21__device_stub__kernelPii .cfi_startproc # %bb.0: subq $88, %rsp .cfi_def_cfa_offset 96 movq %rdi, 56(%rsp) movl %esi, 4(%rsp) leaq 56(%rsp), %rax movq %rax, 64(%rsp) leaq 4(%rsp), %rax movq %rax, 72(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 64(%rsp), %r9 movl $_Z6kernelPii, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $104, %rsp .cfi_adjust_cfa_offset -104 retq .Lfunc_end0: .size _Z21__device_stub__kernelPii, .Lfunc_end0-_Z21__device_stub__kernelPii .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %r14 .cfi_def_cfa_offset 16 pushq %rbx .cfi_def_cfa_offset 24 subq $104, %rsp .cfi_def_cfa_offset 128 .cfi_offset %rbx, -24 .cfi_offset %r14, -16 movl $.L.str, %edi movl $65536, %esi # imm = 0x10000 xorl %eax, %eax callq printf movl $.L.str.1, %edi movl $256, %esi # imm = 0x100 xorl %eax, %eax callq printf movl $262144, %edi # imm = 0x40000 callq malloc movq %rax, %rbx leaq 8(%rsp), %rdi movl $262144, %esi # imm = 0x40000 callq hipMalloc movabsq $4294967424, %rdx # imm = 0x100000080 leaq 384(%rdx), %rdi movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_2 # %bb.1: movq 8(%rsp), %rax movq %rax, 72(%rsp) movl $65536, 20(%rsp) # imm = 0x10000 leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 20(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rdi leaq 40(%rsp), %rsi leaq 32(%rsp), %rdx leaq 24(%rsp), %rcx callq __hipPopCallConfiguration movq 56(%rsp), %rsi movl 64(%rsp), %edx movq 40(%rsp), %rcx movl 48(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z6kernelPii, %edi pushq 24(%rsp) .cfi_adjust_cfa_offset 8 pushq 40(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_2: movq 8(%rsp), %rsi movl $262144, %edx # imm = 0x40000 movq %rbx, %rdi movl $2, %ecx callq hipMemcpy xorl %r14d, %r14d jmp .LBB1_3 .p2align 4, 0x90 .LBB1_5: # in Loop: Header=BB1_3 Depth=1 incq %r14 cmpq $65536, %r14 # imm = 0x10000 je .LBB1_6 .LBB1_3: # =>This Inner Loop Header: Depth=1 movl (%rbx,%r14,4), %edx cmpq %rdx, %r14 je .LBB1_5 # %bb.4: # in Loop: Header=BB1_3 Depth=1 movl $.L.str.2, %edi movl %r14d, %esi # kill: def $edx killed $edx killed $rdx xorl %eax, %eax callq printf jmp .LBB1_5 .LBB1_6: movq %rbx, %rdi callq free movq 8(%rsp), %rdi callq hipFree xorl %eax, %eax addq $104, %rsp .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB2_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB2_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z6kernelPii, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end2: .size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB3_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB3_2: retq .Lfunc_end3: .size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor .cfi_endproc # -- End function .type _Z6kernelPii,@object # @_Z6kernelPii .section .rodata,"a",@progbits .globl _Z6kernelPii .p2align 3, 0x0 _Z6kernelPii: .quad _Z21__device_stub__kernelPii .size _Z6kernelPii, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "# threads: %d \n" .size .L.str, 18 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz "mem_size: %d Kbyte\n" .size .L.str.1, 23 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz "%d, %d\n" .size .L.str.2, 8 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z6kernelPii" .size .L__unnamed_1, 13 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z21__device_stub__kernelPii .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z6kernelPii .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z6kernelPii .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */ /* 0x000e220000002100 */ /*0020*/ HFMA2.MMA R2, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff027435 */ /* 0x000fe200000001ff */ /*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe40000000a00 */ /*0040*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */ /* 0x000e240000002500 */ /*0050*/ IMAD R5, R0, c[0x0][0x0], R5 ; /* 0x0000000000057a24 */ /* 0x001fca00078e0205 */ /*0060*/ IMAD.WIDE R2, R5, R2, c[0x0][0x160] ; /* 0x0000580005027625 */ /* 0x000fca00078e0202 */ /*0070*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */ /* 0x000fe2000c101904 */ /*0080*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0090*/ BRA 0x90; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*00a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0100*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0110*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0120*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0130*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z6kernelPii .globl _Z6kernelPii .p2align 8 .type _Z6kernelPii,@function _Z6kernelPii: s_clause 0x1 s_load_b32 s2, s[0:1], 0x1c s_load_b64 s[0:1], s[0:1], 0x0 s_waitcnt lgkmcnt(0) s_and_b32 s2, s2, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1] v_ashrrev_i32_e32 v2, 31, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[2:3], 2, v[1:2] v_add_co_u32 v2, vcc_lo, s0, v2 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v3, vcc_lo, s1, v3, vcc_lo global_store_b32 v[2:3], v1, off s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z6kernelPii .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 272 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 4 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z6kernelPii, .Lfunc_end0-_Z6kernelPii .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .offset: 8 .size: 4 .value_kind: by_value - .offset: 16 .size: 4 .value_kind: hidden_block_count_x - .offset: 20 .size: 4 .value_kind: hidden_block_count_y - .offset: 24 .size: 4 .value_kind: hidden_block_count_z - .offset: 28 .size: 2 .value_kind: hidden_group_size_x - .offset: 30 .size: 2 .value_kind: hidden_group_size_y - .offset: 32 .size: 2 .value_kind: hidden_group_size_z - .offset: 34 .size: 2 .value_kind: hidden_remainder_x - .offset: 36 .size: 2 .value_kind: hidden_remainder_y - .offset: 38 .size: 2 .value_kind: hidden_remainder_z - .offset: 56 .size: 8 .value_kind: hidden_global_offset_x - .offset: 64 .size: 8 .value_kind: hidden_global_offset_y - .offset: 72 .size: 8 .value_kind: hidden_global_offset_z - .offset: 80 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 272 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z6kernelPii .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z6kernelPii.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 4 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_00018ad6_00000000-6_test.cudafe1.cpp" .text #APP .globl _ZSt21ios_base_library_initv #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB3672: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3672: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z26__device_stub__Z6kernelPiiPii .type _Z26__device_stub__Z6kernelPiiPii, @function _Z26__device_stub__Z6kernelPiiPii: .LFB3694: .cfi_startproc endbr64 subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 8(%rsp) movl %esi, 4(%rsp) movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) leaq 4(%rsp), %rax movq %rax, 88(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 104(%rsp), %rax subq %fs:40, %rax jne .L8 addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 136 pushq 24(%rsp) .cfi_def_cfa_offset 144 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z6kernelPii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 128 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE3694: .size _Z26__device_stub__Z6kernelPiiPii, .-_Z26__device_stub__Z6kernelPiiPii .globl _Z6kernelPii .type _Z6kernelPii, @function _Z6kernelPii: .LFB3695: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z26__device_stub__Z6kernelPiiPii addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3695: .size _Z6kernelPii, .-_Z6kernelPii .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "# threads: %d \n" .LC1: .string "mem_size: %d Kbyte\n" .LC2: .string "%d, %d\n" .text .globl main .type main, @function main: .LFB3669: .cfi_startproc endbr64 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 subq $48, %rsp .cfi_def_cfa_offset 80 movq %fs:40, %rax movq %rax, 40(%rsp) xorl %eax, %eax movl $65536, %edx leaq .LC0(%rip), %rsi movl $2, %edi call __printf_chk@PLT movl $256, %edx leaq .LC1(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $262144, %edi call malloc@PLT movq %rax, %rbp leaq 8(%rsp), %rdi movl $262144, %esi call cudaMalloc@PLT movl $512, 16(%rsp) movl $1, 20(%rsp) movl $128, 28(%rsp) movl $1, 32(%rsp) movl $0, %r9d movl $0, %r8d movq 28(%rsp), %rdx movl $1, %ecx movq 16(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L18 .L12: movl $2, %ecx movl $262144, %edx movq 8(%rsp), %rsi movq %rbp, %rdi call cudaMemcpy@PLT movl $0, %ebx leaq .LC2(%rip), %r12 jmp .L14 .L18: movl $65536, %esi movq 8(%rsp), %rdi call _Z26__device_stub__Z6kernelPiiPii jmp .L12 .L13: addq $1, %rbx cmpq $65536, %rbx je .L19 .L14: movl 0(%rbp,%rbx,4), %ecx cmpl %ebx, %ecx je .L13 movl %ebx, %edx movq %r12, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT jmp .L13 .L19: movq %rbp, %rdi call free@PLT movq 8(%rsp), %rdi call cudaFree@PLT movq 40(%rsp), %rax subq %fs:40, %rax jne .L20 movl $0, %eax addq $48, %rsp .cfi_remember_state .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .L20: .cfi_restore_state call __stack_chk_fail@PLT .cfi_endproc .LFE3669: .size main, .-main .section .rodata.str1.1 .LC3: .string "_Z6kernelPii" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB3697: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC3(%rip), %rdx movq %rdx, %rcx leaq _Z6kernelPii(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3697: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "test.hip" # Start of file scope inline assembly .globl _ZSt21ios_base_library_initv # End of file scope inline assembly .globl _Z21__device_stub__kernelPii # -- Begin function _Z21__device_stub__kernelPii .p2align 4, 0x90 .type _Z21__device_stub__kernelPii,@function _Z21__device_stub__kernelPii: # @_Z21__device_stub__kernelPii .cfi_startproc # %bb.0: subq $88, %rsp .cfi_def_cfa_offset 96 movq %rdi, 56(%rsp) movl %esi, 4(%rsp) leaq 56(%rsp), %rax movq %rax, 64(%rsp) leaq 4(%rsp), %rax movq %rax, 72(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 64(%rsp), %r9 movl $_Z6kernelPii, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $104, %rsp .cfi_adjust_cfa_offset -104 retq .Lfunc_end0: .size _Z21__device_stub__kernelPii, .Lfunc_end0-_Z21__device_stub__kernelPii .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %r14 .cfi_def_cfa_offset 16 pushq %rbx .cfi_def_cfa_offset 24 subq $104, %rsp .cfi_def_cfa_offset 128 .cfi_offset %rbx, -24 .cfi_offset %r14, -16 movl $.L.str, %edi movl $65536, %esi # imm = 0x10000 xorl %eax, %eax callq printf movl $.L.str.1, %edi movl $256, %esi # imm = 0x100 xorl %eax, %eax callq printf movl $262144, %edi # imm = 0x40000 callq malloc movq %rax, %rbx leaq 8(%rsp), %rdi movl $262144, %esi # imm = 0x40000 callq hipMalloc movabsq $4294967424, %rdx # imm = 0x100000080 leaq 384(%rdx), %rdi movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_2 # %bb.1: movq 8(%rsp), %rax movq %rax, 72(%rsp) movl $65536, 20(%rsp) # imm = 0x10000 leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 20(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rdi leaq 40(%rsp), %rsi leaq 32(%rsp), %rdx leaq 24(%rsp), %rcx callq __hipPopCallConfiguration movq 56(%rsp), %rsi movl 64(%rsp), %edx movq 40(%rsp), %rcx movl 48(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z6kernelPii, %edi pushq 24(%rsp) .cfi_adjust_cfa_offset 8 pushq 40(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_2: movq 8(%rsp), %rsi movl $262144, %edx # imm = 0x40000 movq %rbx, %rdi movl $2, %ecx callq hipMemcpy xorl %r14d, %r14d jmp .LBB1_3 .p2align 4, 0x90 .LBB1_5: # in Loop: Header=BB1_3 Depth=1 incq %r14 cmpq $65536, %r14 # imm = 0x10000 je .LBB1_6 .LBB1_3: # =>This Inner Loop Header: Depth=1 movl (%rbx,%r14,4), %edx cmpq %rdx, %r14 je .LBB1_5 # %bb.4: # in Loop: Header=BB1_3 Depth=1 movl $.L.str.2, %edi movl %r14d, %esi # kill: def $edx killed $edx killed $rdx xorl %eax, %eax callq printf jmp .LBB1_5 .LBB1_6: movq %rbx, %rdi callq free movq 8(%rsp), %rdi callq hipFree xorl %eax, %eax addq $104, %rsp .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB2_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB2_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z6kernelPii, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end2: .size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB3_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB3_2: retq .Lfunc_end3: .size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor .cfi_endproc # -- End function .type _Z6kernelPii,@object # @_Z6kernelPii .section .rodata,"a",@progbits .globl _Z6kernelPii .p2align 3, 0x0 _Z6kernelPii: .quad _Z21__device_stub__kernelPii .size _Z6kernelPii, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "# threads: %d \n" .size .L.str, 18 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz "mem_size: %d Kbyte\n" .size .L.str.1, 23 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz "%d, %d\n" .size .L.str.2, 8 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z6kernelPii" .size .L__unnamed_1, 13 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z21__device_stub__kernelPii .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z6kernelPii .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/sequence.h> #include <thrust/functional.h> #include <thrust/sort.h> #include <thrust/transform.h> #include <thrust/reduce.h> #include <chrono> #include <algorithm> #include <vector> //using sys_clock = std::chrono::system_clock; int thrust_sequence() { thrust::device_vector<int> D_vec(10,1); thrust::fill(D_vec.begin(), D_vec.begin()+7, 9); thrust::host_vector<int> H_vec(D_vec.begin(),D_vec.begin()+5); thrust::sequence(H_vec.begin(), H_vec.end(), 5, 2); thrust::copy(H_vec.begin(), H_vec.end(), D_vec.begin()); int i = 0; for(auto value : D_vec) std::cout << "D[" << i++ << "]= " << value << std::endl; } int thrust_sort() { int current_h = 0, current_d = 0, exit = 0, limit = 1 << 24; std::chrono::time_point<std::chrono::system_clock> t1, t2; std::chrono::duration<double, std::milli> exec_time_ms; thrust::host_vector<int> h_vec(limit); thrust::generate(h_vec.begin(), h_vec.end(), rand); thrust::device_vector<int> d_vec = h_vec; t1 = std::chrono::system_clock::now(); thrust::sort(d_vec.begin(), d_vec.end()); t2 = std::chrono::system_clock::now(); exec_time_ms = t2 - t1; std::cout << "thrust gpu sort: " << exec_time_ms.count() << "ms." << std::endl; std::vector<int> stl_vec(h_vec.size()); thrust::copy(h_vec.begin(), h_vec.end(), stl_vec.begin()); t1 = std::chrono::system_clock::now(); std::sort(stl_vec.begin(), stl_vec.end()); t2 = std::chrono::system_clock::now(); exec_time_ms = t2 - t1; std::cout << "stl sort: " << exec_time_ms.count() << "ms." << std::endl; } struct functor { const float a; functor(float _a) : a(_a) {} __host__ __device__ float operator()(const float &x, const float &y) const { return a * x + y; } }; int operador() { const float A = 5; const int size = 10; thrust::host_vector<float> X(size), Y(size), Z(size); thrust::sequence(X.begin(), X.end(), 10, 10); thrust::sequence(Y.begin(), Y.end(), 1, 5); thrust::transform(X.begin(), X.end(), Y.begin(), Y.begin(), functor(A)); for(int i = 0; i < Y.size(); i++) { std::cout << "Y[" << i << "] = " << Y[i] << std::endl; } } template <typename T> struct square { __host__ __device__ float operator()(const T &x) const { return x*x; } }; int main () { float x[4] = {1.0, 2.0, 3.0, 4.0}; thrust::device_vector<float> d_vec(x, x+4); square<float> unary_op; thrust::plus<float> binary_op; float norm = std::sqrt( thrust::transform_reduce(d_vec.begin(), d_vec.end(), unary_op, 0, binary_op) ); std::cout << norm << std::endl; }
#include <hip/hip_runtime.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/sequence.h> #include <thrust/functional.h> #include <thrust/sort.h> #include <thrust/transform.h> #include <thrust/reduce.h> #include <chrono> #include <algorithm> #include <vector> //using sys_clock = std::chrono::system_clock; int thrust_sequence() { thrust::device_vector<int> D_vec(10,1); thrust::fill(D_vec.begin(), D_vec.begin()+7, 9); thrust::host_vector<int> H_vec(D_vec.begin(),D_vec.begin()+5); thrust::sequence(H_vec.begin(), H_vec.end(), 5, 2); thrust::copy(H_vec.begin(), H_vec.end(), D_vec.begin()); int i = 0; for(auto value : D_vec) std::cout << "D[" << i++ << "]= " << value << std::endl; } int thrust_sort() { int current_h = 0, current_d = 0, exit = 0, limit = 1 << 24; std::chrono::time_point<std::chrono::system_clock> t1, t2; std::chrono::duration<double, std::milli> exec_time_ms; thrust::host_vector<int> h_vec(limit); thrust::generate(h_vec.begin(), h_vec.end(), rand); thrust::device_vector<int> d_vec = h_vec; t1 = std::chrono::system_clock::now(); thrust::sort(d_vec.begin(), d_vec.end()); t2 = std::chrono::system_clock::now(); exec_time_ms = t2 - t1; std::cout << "thrust gpu sort: " << exec_time_ms.count() << "ms." << std::endl; std::vector<int> stl_vec(h_vec.size()); thrust::copy(h_vec.begin(), h_vec.end(), stl_vec.begin()); t1 = std::chrono::system_clock::now(); std::sort(stl_vec.begin(), stl_vec.end()); t2 = std::chrono::system_clock::now(); exec_time_ms = t2 - t1; std::cout << "stl sort: " << exec_time_ms.count() << "ms." << std::endl; } struct functor { const float a; functor(float _a) : a(_a) {} __host__ __device__ float operator()(const float &x, const float &y) const { return a * x + y; } }; int operador() { const float A = 5; const int size = 10; thrust::host_vector<float> X(size), Y(size), Z(size); thrust::sequence(X.begin(), X.end(), 10, 10); thrust::sequence(Y.begin(), Y.end(), 1, 5); thrust::transform(X.begin(), X.end(), Y.begin(), Y.begin(), functor(A)); for(int i = 0; i < Y.size(); i++) { std::cout << "Y[" << i << "] = " << Y[i] << std::endl; } } template <typename T> struct square { __host__ __device__ float operator()(const T &x) const { return x*x; } }; int main () { float x[4] = {1.0, 2.0, 3.0, 4.0}; thrust::device_vector<float> d_vec(x, x+4); square<float> unary_op; thrust::plus<float> binary_op; float norm = std::sqrt( thrust::transform_reduce(d_vec.begin(), d_vec.end(), unary_op, 0, binary_op) ); std::cout << norm << std::endl; }
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
//Based on the work of Andrew Krepps #include <stdio.h> #include <stdlib.h> //srand and rand #include <math.h> // Constant data declaration #define WORKSIZE 1024 // define a default worksize for constant data __device__ __constant__ int d_a_const[WORKSIZE]; __device__ __constant__ int d_b_const[WORKSIZE]; /* Profile functions. Taken and modified from https://devblogs.nvidia.com/how-optimize-data-transfers-cuda-cc/ */ void profileCopiesHostToDevice(int *d_a, int *h_a, int *d_b, int *h_b, const unsigned int bytes, const char *desc){ // events for timing cudaEvent_t startEvent, stopEvent; cudaEventCreate(&startEvent); cudaEventCreate(&stopEvent); //start a recording event and execute the transfer afte cudaEventRecord(startEvent, 0); // Use either cudaMemcpy or cudaMemcpyToSymbol depending on shared vs constant memory if(strcmp(desc, "Shared") == 0){ cudaMemcpy( d_a, h_a, bytes, cudaMemcpyHostToDevice); cudaMemcpy( d_b, h_b, bytes, cudaMemcpyHostToDevice); }else if(strcmp(desc, "Constant") == 0){ cudaMemcpyToSymbol( d_a_const, h_a, bytes,0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol( d_b_const, h_b, bytes,0, cudaMemcpyHostToDevice); } cudaEventRecord(stopEvent, 0); //stop cudaEventSynchronize(stopEvent); float time; cudaEventElapsedTime(&time, startEvent, stopEvent); printf("\nTransfers Host to Device Time Elaped: %f ms, Bandwidth (MB/s): %f\n\n", time, bytes * 1e-3 / time); // clean up events cudaEventDestroy(startEvent); cudaEventDestroy(stopEvent); } void profileCopiesDeviceToHost( int *h_c_add, int *d_c_add, int *h_c_sub, int *d_c_sub, int *h_c_mult, int *d_c_mult, int *h_c_mod, int *d_c_mod, const unsigned int bytes, const char *desc){ // events for timing cudaEvent_t startEvent, stopEvent; cudaEventCreate(&startEvent); cudaEventCreate(&stopEvent); cudaEventRecord(startEvent, 0); if(strcmp(desc,"Shared") == 0){ cudaMemcpy( h_c_add, d_c_add, bytes, cudaMemcpyDeviceToHost); cudaMemcpy( h_c_sub, d_c_sub, bytes, cudaMemcpyDeviceToHost); cudaMemcpy( h_c_mult, d_c_mult, bytes, cudaMemcpyDeviceToHost); cudaMemcpy( h_c_mod, d_c_mod, bytes, cudaMemcpyDeviceToHost); }else if(strcmp(desc,"Constant") == 0){ cudaMemcpy( h_c_add, d_c_add, bytes, cudaMemcpyDeviceToHost); cudaMemcpy( h_c_sub, d_c_sub, bytes, cudaMemcpyDeviceToHost); cudaMemcpy( h_c_mult, d_c_mult, bytes, cudaMemcpyDeviceToHost); cudaMemcpy( h_c_mod, d_c_mod, bytes, cudaMemcpyDeviceToHost); } cudaEventRecord(stopEvent, 0); cudaEventSynchronize(stopEvent); float time; cudaEventElapsedTime(&time, startEvent, stopEvent); printf("\n%s transfers Device To Host Time Elaped: %f ms, Bandwidth (MB/s): %f\n\n",desc,time, bytes * 1e-3 / time); // clean up events cudaEventDestroy(startEvent); cudaEventDestroy(stopEvent); } /* Arithmetic Functions Using shared Memory */ // Add Function __global__ void add_shared(int *a, int *b, int *c, int n){ extern __shared__ int res[]; // Get our global thread ID int id = blockIdx.x*blockDim.x+threadIdx.x; // Make sure we do not go out of bounds if (id < n) res[threadIdx.x] = a[id] + b[id]; __syncthreads(); // wait for all threads in the block to finish c[threadIdx.x] = res[threadIdx.x];//since threads from different blocks cannot talk, use thread index instead } // subtract function __global__ void subtract_shared(int *a, int *b, int *c, int n){ extern __shared__ int res[]; // Get our global thread ID int id = blockIdx.x*blockDim.x+threadIdx.x; // Make sure we do not go out of bounds if (id < n) res[threadIdx.x] = a[id] - b[id]; __syncthreads(); // wait for all threads in the block to finish c[threadIdx.x] = res[threadIdx.x]; } // multiply function __global__ void mult_shared(int *a, int *b, int *c, int n){ extern __shared__ int res[]; // Get our global thread ID int id = blockIdx.x*blockDim.x+threadIdx.x; // Make sure we do not go out of bounds if (id < n) res[threadIdx.x] = a[id] * b[id]; __syncthreads(); // wait for all threads in the block to finish c[threadIdx.x] = res[threadIdx.x]; } // Moudulus function __global__ void mod_shared(int *a, int *b, int *c, int n){ extern __shared__ int res[]; // Get our global thread ID int id = blockIdx.x*blockDim.x+threadIdx.x; // Make sure we do not go out of bounds if (id < n) res[threadIdx.x] = a[id] % b[id]; __syncthreads(); // wait for all threads in the block to finish c[threadIdx.x] = res[threadIdx.x]; } /* Arithmetic Functions Using Constant Memory */ // Add Function __global__ void add_const( int *c, int n){ // Get our global thread ID const unsigned int id = blockIdx.x*blockDim.x+threadIdx.x; // Make sure we do not go out of bounds // use the constant data declared if (id < n) c[id] = d_a_const[id] + d_b_const[id]; } // subtract function __global__ void subtract_const(int *c, int n){ // Get our global thread ID const unsigned int id = blockIdx.x*blockDim.x+threadIdx.x; // Make sure we do not go out of bounds if (id < n) c[id] = d_a_const[id] - d_b_const[id]; } // multiply function __global__ void mult_const(int *c, int n){ // Get our global thread ID const unsigned int id = blockIdx.x*blockDim.x+threadIdx.x; // Make sure we do not go out of bounds if (id < n) c[id] = d_a_const[id] * d_b_const[id]; } // Moudulus function __global__ void mod_const(int *c, int n){ // Get our global thread ID const unsigned int id = blockIdx.x*blockDim.x+threadIdx.x; // Make sure we do not go out of bounds if (id < n) c[id] = d_a_const[id] % d_b_const[id]; } /* Function calls to arithmetic functions using shared memory and timing */ void perform_add_shared(int numBlocks, int totalThreads, int *d_a, int *d_b, int *d_c_add){ float time; // events for timing cudaEvent_t startEvent, stopEvent; cudaEventCreate(&startEvent); cudaEventCreate(&stopEvent); //start a recording event and execute the Kernels after cudaEventRecord(startEvent, 0); //performing add function printf(" Performing Add function..."); add_shared<<<numBlocks, totalThreads, totalThreads*sizeof(int)>>>(d_a, d_b, d_c_add, totalThreads); cudaDeviceSynchronize(); cudaEventRecord(stopEvent, 0); //stop cudaEventSynchronize(stopEvent); cudaEventElapsedTime(&time, startEvent, stopEvent); printf(" Elapsed Time: %f\n", time); // clean up events cudaEventDestroy(startEvent); cudaEventDestroy(stopEvent); } void perform_sub_shared(int numBlocks, int totalThreads, int *d_a,int *d_b,int *d_c_sub){ float time; // events for timing cudaEvent_t startEvent, stopEvent; cudaEventCreate(&startEvent); cudaEventCreate(&stopEvent); //start a recording event and execute the Kernels after cudaEventRecord(startEvent, 0); //performing subtract function printf(" Performing subtract function"); subtract_shared<<<numBlocks, totalThreads, totalThreads*sizeof(int)>>>(d_a, d_b, d_c_sub, totalThreads); cudaDeviceSynchronize(); cudaEventRecord(stopEvent, 0); //stop cudaEventSynchronize(stopEvent); cudaEventElapsedTime(&time, startEvent, stopEvent); printf(" Elapsed Time: %f\n", time); // clean up events cudaEventDestroy(startEvent); cudaEventDestroy(stopEvent); } void perform_mult_shared(int numBlocks, int totalThreads, int *d_a,int *d_b,int *d_c_mult){ float time; // events for timing cudaEvent_t startEvent, stopEvent; cudaEventCreate(&startEvent); cudaEventCreate(&stopEvent); //start a recording event and execute the Kernels after cudaEventRecord(startEvent, 0); //performing mult function printf(" Performing mult function"); mult_shared<<<numBlocks, totalThreads, totalThreads*sizeof(int)>>>(d_a, d_b, d_c_mult, totalThreads); cudaDeviceSynchronize(); cudaEventRecord(stopEvent, 0); //stop cudaEventSynchronize(stopEvent); cudaEventElapsedTime(&time, startEvent, stopEvent); printf(" Elapsed Time: %f\n", time); // clean up events cudaEventDestroy(startEvent); cudaEventDestroy(stopEvent); } void perform_mod_shared(int numBlocks, int totalThreads, int *d_a,int *d_b,int *d_c_mod){ float time; // events for timing cudaEvent_t startEvent, stopEvent; cudaEventCreate(&startEvent); cudaEventCreate(&stopEvent); //start a recording event and execute the Kernels after cudaEventRecord(startEvent, 0); //performing mod fuction printf(" Performing mod function"); mod_shared<<<numBlocks, totalThreads, totalThreads*sizeof(int)>>>(d_a, d_b, d_c_mod, totalThreads); cudaDeviceSynchronize(); cudaEventRecord(stopEvent, 0); //stop cudaEventSynchronize(stopEvent); cudaEventElapsedTime(&time, startEvent, stopEvent); printf(" Elapsed Time: %f\n", time); // clean up events cudaEventDestroy(startEvent); cudaEventDestroy(stopEvent); } // Shared Memory Implementation function void execute_arithmetic_sharedMem(int totalThreads, int numBlocks){ printf("\t\t*****Executing Arithmetic Functions Using Shared Memory*****\n"); // Host input vectors int *h_a, *h_b; //Host output vectors for different functions "h_c_func" int *h_c_add,*h_c_sub,*h_c_mult,*h_c_mod; // Device input vectors int *d_a, *d_b; //Device output vector int *d_c_add,*d_c_sub,*d_c_mult,*d_c_mod; // Size, in bytes, of each vector const unsigned int bytes = totalThreads*sizeof(int); // Allocate memory for each vector on host Pinned cudaMallocHost((void**)&h_a, bytes); cudaMallocHost((void**)&h_b, bytes); cudaMallocHost((void**)&h_c_add, bytes); cudaMallocHost((void**)&h_c_sub, bytes); cudaMallocHost((void**)&h_c_mult, bytes); cudaMallocHost((void**)&h_c_mod, bytes); // Allocate memory for each vector on GPU cudaMalloc(&d_a, bytes); cudaMalloc(&d_b, bytes); cudaMalloc(&d_c_add, bytes); cudaMalloc(&d_c_sub, bytes); cudaMalloc(&d_c_mult, bytes); cudaMalloc(&d_c_mod, bytes); //initialize the input vectors for(int i = 0;i<totalThreads;i++){ //first array is 0 through number of threads h_a[i] = i; // second array is a random number between 0 and 3 h_b[i] = rand() % 4; } //printf the first 7 elements of input arrays printf("Array 1: "); for(int i = 0; i<7; i++){ printf("%d ", h_a[i]); } printf("\nArray 2: "); for(int i = 0; i<7; i++){ printf("%d ", h_b[i]); } printf("\n\n"); //copy both input arrays from host to device and profile it (see profileCopiesHostToDevice) profileCopiesHostToDevice(d_a, h_a, d_b, h_b, bytes, "Shared"); //Perform arithmetic functions perform_add_shared(numBlocks, totalThreads, d_a, d_b, d_c_add); perform_sub_shared(numBlocks, totalThreads, d_a, d_b, d_c_sub); perform_mult_shared(numBlocks, totalThreads, d_a, d_b, d_c_mult); perform_mod_shared(numBlocks, totalThreads, d_a, d_b, d_c_mod); //copy the output arrays from device to host profileCopiesDeviceToHost(h_c_add,d_c_add,h_c_sub, d_c_sub,h_c_mult, d_c_mult,h_c_mod, d_c_mod, bytes,"Shared"); // printf the first 7 elements of the results printf("Arithmetic Results: \n"); printf("Add: "); for(int i = 0; i<7; i++){ printf("%d ", h_c_add[i]); } printf("\nSubtract: "); for(int i = 0; i<7; i++){ printf("%d ", h_c_sub[i]); } printf("\nMultiply: "); for(int i = 0; i<7; i++){ printf("%d ", h_c_mult[i]); } printf("\nMultiply: "); for(int i = 0; i<7; i++){ printf("%d ", h_c_mod[i]); } printf("\n\n"); //free up space on our GPU cudaFree(d_a); cudaFree(d_b); cudaFree(d_c_add); cudaFree(d_c_sub); cudaFree(d_c_mult); cudaFree(d_c_add); //free up space on our CPU use cudaFreeHost since pinnned cudaFreeHost(h_a); cudaFreeHost(h_b); cudaFreeHost(h_c_add); cudaFreeHost(h_c_sub); cudaFreeHost(h_c_mult); cudaFreeHost(h_c_mod); } /* Function calls to arithmetic functions using constant memory */ void perform_add_const(int numBlocks, int totalThreads,int *d_c_add){ float time; // events for timing cudaEvent_t startEvent, stopEvent; cudaEventCreate(&startEvent); cudaEventCreate(&stopEvent); //start a recording event and execute the Kernels after cudaEventRecord(startEvent, 0); //performing add function printf(" Performing Add function..."); add_const<<<numBlocks, totalThreads, totalThreads*sizeof(int)>>>(d_c_add, totalThreads); cudaDeviceSynchronize(); cudaEventRecord(stopEvent, 0); //stop cudaEventSynchronize(stopEvent); cudaEventElapsedTime(&time, startEvent, stopEvent); printf(" Elapsed Time: %f\n", time); // clean up events cudaEventDestroy(startEvent); cudaEventDestroy(stopEvent); } void perform_sub_const(int numBlocks, int totalThreads, int *d_c_sub){ float time; // events for timing cudaEvent_t startEvent, stopEvent; cudaEventCreate(&startEvent); cudaEventCreate(&stopEvent); //start a recording event and execute the Kernels after cudaEventRecord(startEvent, 0); //performing subtract function printf(" Performing subtract function"); subtract_const<<<numBlocks, totalThreads, totalThreads*sizeof(int)>>>(d_c_sub, totalThreads); cudaDeviceSynchronize(); cudaEventRecord(stopEvent, 0); //stop cudaEventSynchronize(stopEvent); cudaEventElapsedTime(&time, startEvent, stopEvent); printf(" Elapsed Time: %f\n", time); // clean up events cudaEventDestroy(startEvent); cudaEventDestroy(stopEvent); } void perform_mult_const(int numBlocks, int totalThreads,int *d_c_mult){ float time; // events for timing cudaEvent_t startEvent, stopEvent; cudaEventCreate(&startEvent); cudaEventCreate(&stopEvent); //start a recording event and execute the Kernels after cudaEventRecord(startEvent, 0); //performing mult function printf(" Performing mult function"); mult_const<<<numBlocks, totalThreads, totalThreads*sizeof(int)>>>(d_c_mult, totalThreads); cudaDeviceSynchronize(); cudaEventRecord(stopEvent, 0); //stop cudaEventSynchronize(stopEvent); cudaEventElapsedTime(&time, startEvent, stopEvent); printf(" Elapsed Time: %f\n", time); // clean up events cudaEventDestroy(startEvent); cudaEventDestroy(stopEvent); } void perform_mod_const(int numBlocks, int totalThreads, int *d_c_mod){ float time; // events for timing cudaEvent_t startEvent, stopEvent; cudaEventCreate(&startEvent); cudaEventCreate(&stopEvent); //start a recording event and execute the Kernels after cudaEventRecord(startEvent, 0); //performing mod fuction printf(" Performing mod function"); mod_const<<<numBlocks, totalThreads, totalThreads*sizeof(int)>>>( d_c_mod, totalThreads); cudaDeviceSynchronize(); cudaEventRecord(stopEvent, 0); //stop cudaEventSynchronize(stopEvent); cudaEventElapsedTime(&time, startEvent, stopEvent); printf(" Elapsed Time: %f\n", time); // clean up events cudaEventDestroy(startEvent); cudaEventDestroy(stopEvent); } // Constant Memory Implementation void execute_arithmetic_constMem(int totalThreads, int numBlocks){ printf("\t\t*****Executing Arithmetic Functions Using Constant Memory*****\n"); // Host input vectors int *h_a, *h_b; //Host output vectors for different functions "h_c_func" int *h_c_add,*h_c_sub,*h_c_mult,*h_c_mod; // Device input vectors int *d_a, *d_b; //Device output vector int *d_c_add,*d_c_sub,*d_c_mult,*d_c_mod; // Size, in bytes, of each vector const unsigned int bytes = totalThreads*sizeof(int); // Allocate memory for each vector on host Pinned cudaMallocHost((void**)&h_a, bytes); cudaMallocHost((void**)&h_b, bytes); cudaMallocHost((void**)&h_c_add, bytes); cudaMallocHost((void**)&h_c_sub, bytes); cudaMallocHost((void**)&h_c_mult, bytes); cudaMallocHost((void**)&h_c_mod, bytes); // Allocate memory for each vector on GPU cudaMalloc(&d_a, bytes); cudaMalloc(&d_b, bytes); cudaMalloc(&d_c_add, bytes); cudaMalloc(&d_c_sub, bytes); cudaMalloc(&d_c_mult, bytes); cudaMalloc(&d_c_mod, bytes); //initialize the input vectors for(int i = 0;i<totalThreads;i++){ //first array is 0 through number of threads h_a[i] = i; // second array is a random number between 0 and 3 h_b[i] = rand() % 4; } //printf the first 7 elements of input arrays printf("Array 1: "); for(int i = 0; i<7; i++){ printf("%d ", h_a[i]); } printf("\nArray 2: "); for(int i = 0; i<7; i++){ printf("%d ", h_b[i]); } printf("\n\n"); //copy both input arrays from host to device using cudaMemcpyToSymbol() (see profileCopiesHostToDevice) profileCopiesHostToDevice(d_a_const, h_a, d_b_const, h_b, bytes, "Constant"); //Perform arithmetic functions perform_add_const(numBlocks, totalThreads, d_c_add); perform_sub_const(numBlocks, totalThreads, d_c_sub); perform_mult_const(numBlocks, totalThreads, d_c_mult); perform_mod_const(numBlocks, totalThreads, d_c_mod); //copy the output arrays from device to host using cudaMemcyFromSymbol() profileCopiesDeviceToHost(h_c_add,d_c_add,h_c_sub, d_c_sub,h_c_mult, d_c_mult,h_c_mod, d_c_mod, bytes,"Constant"); // printf the first 7 elements of the results printf("Arithmetic Results: \n"); printf("Add: "); for(int i = 0; i<7; i++){ printf("%d ", h_c_add[i]); } printf("\nSubtract: "); for(int i = 0; i<7; i++){ printf("%d ", h_c_sub[i]); } printf("\nMultiply: "); for(int i = 0; i<7; i++){ printf("%d ", h_c_mult[i]); } printf("\nMod: "); for(int i = 0; i<7; i++){ printf("%d ", h_c_mod[i]); } printf("\n\n"); //free up space on our GPU cudaFree(d_a); cudaFree(d_b); cudaFree(d_c_add); cudaFree(d_c_sub); cudaFree(d_c_mult); cudaFree(d_c_add); //free up space on our CPU use cudaFreeHost since pinnned cudaFreeHost(h_a); cudaFreeHost(h_b); cudaFreeHost(h_c_add); cudaFreeHost(h_c_sub); cudaFreeHost(h_c_mult); cudaFreeHost(h_c_mod); } int main(int argc, char** argv) { int totalThreads = (1 << 10); int blockSize = 256; //User wants to run the Global vs Pinned Examples if( argc > 2 && argc < 4){ // Ensure the user supplies both number of threads and block size // otherwise use default values totalThreads = atoi(argv[1]); blockSize = atoi(argv[2]); } int numBlocks = totalThreads/blockSize; printf("\nUsing %d Threads and %d BlockSize\n",totalThreads, blockSize); // validate command line arguments if (totalThreads % blockSize != 0) { ++numBlocks; totalThreads = numBlocks*blockSize; printf("Warning: Total thread count is not evenly divisible by the block size\n"); printf("The total number of threads will be rounded up to %d\n", totalThreads); } // Lets see what we are working with and calculate the Amount of data we are transfering cudaDeviceProp prop; cudaGetDeviceProperties(&prop,0); const unsigned int bytes = totalThreads*sizeof(int); printf("\nDevice: %s\n", prop.name); printf("Transfer size (MB): %d\n\n", bytes * bytes / totalThreads); //Execute Pageable Arithmetic execute_arithmetic_sharedMem(totalThreads, numBlocks); //Execute The Pinned Arithmetic execute_arithmetic_constMem(totalThreads, numBlocks); return 0; }
//Based on the work of Andrew Krepps #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> //srand and rand #include <math.h> // Constant data declaration #define WORKSIZE 1024 // define a default worksize for constant data __device__ __constant__ int d_a_const[WORKSIZE]; __device__ __constant__ int d_b_const[WORKSIZE]; /* Profile functions. Taken and modified from https://devblogs.nvidia.com/how-optimize-data-transfers-cuda-cc/ */ void profileCopiesHostToDevice(int *d_a, int *h_a, int *d_b, int *h_b, const unsigned int bytes, const char *desc){ // events for timing hipEvent_t startEvent, stopEvent; hipEventCreate(&startEvent); hipEventCreate(&stopEvent); //start a recording event and execute the transfer afte hipEventRecord(startEvent, 0); // Use either cudaMemcpy or cudaMemcpyToSymbol depending on shared vs constant memory if(strcmp(desc, "Shared") == 0){ hipMemcpy( d_a, h_a, bytes, hipMemcpyHostToDevice); hipMemcpy( d_b, h_b, bytes, hipMemcpyHostToDevice); }else if(strcmp(desc, "Constant") == 0){ hipMemcpyToSymbol( HIP_SYMBOL(d_a_const), h_a, bytes,0, hipMemcpyHostToDevice); hipMemcpyToSymbol( HIP_SYMBOL(d_b_const), h_b, bytes,0, hipMemcpyHostToDevice); } hipEventRecord(stopEvent, 0); //stop hipEventSynchronize(stopEvent); float time; hipEventElapsedTime(&time, startEvent, stopEvent); printf("\nTransfers Host to Device Time Elaped: %f ms, Bandwidth (MB/s): %f\n\n", time, bytes * 1e-3 / time); // clean up events hipEventDestroy(startEvent); hipEventDestroy(stopEvent); } void profileCopiesDeviceToHost( int *h_c_add, int *d_c_add, int *h_c_sub, int *d_c_sub, int *h_c_mult, int *d_c_mult, int *h_c_mod, int *d_c_mod, const unsigned int bytes, const char *desc){ // events for timing hipEvent_t startEvent, stopEvent; hipEventCreate(&startEvent); hipEventCreate(&stopEvent); hipEventRecord(startEvent, 0); if(strcmp(desc,"Shared") == 0){ hipMemcpy( h_c_add, d_c_add, bytes, hipMemcpyDeviceToHost); hipMemcpy( h_c_sub, d_c_sub, bytes, hipMemcpyDeviceToHost); hipMemcpy( h_c_mult, d_c_mult, bytes, hipMemcpyDeviceToHost); hipMemcpy( h_c_mod, d_c_mod, bytes, hipMemcpyDeviceToHost); }else if(strcmp(desc,"Constant") == 0){ hipMemcpy( h_c_add, d_c_add, bytes, hipMemcpyDeviceToHost); hipMemcpy( h_c_sub, d_c_sub, bytes, hipMemcpyDeviceToHost); hipMemcpy( h_c_mult, d_c_mult, bytes, hipMemcpyDeviceToHost); hipMemcpy( h_c_mod, d_c_mod, bytes, hipMemcpyDeviceToHost); } hipEventRecord(stopEvent, 0); hipEventSynchronize(stopEvent); float time; hipEventElapsedTime(&time, startEvent, stopEvent); printf("\n%s transfers Device To Host Time Elaped: %f ms, Bandwidth (MB/s): %f\n\n",desc,time, bytes * 1e-3 / time); // clean up events hipEventDestroy(startEvent); hipEventDestroy(stopEvent); } /* Arithmetic Functions Using shared Memory */ // Add Function __global__ void add_shared(int *a, int *b, int *c, int n){ extern __shared__ int res[]; // Get our global thread ID int id = blockIdx.x*blockDim.x+threadIdx.x; // Make sure we do not go out of bounds if (id < n) res[threadIdx.x] = a[id] + b[id]; __syncthreads(); // wait for all threads in the block to finish c[threadIdx.x] = res[threadIdx.x];//since threads from different blocks cannot talk, use thread index instead } // subtract function __global__ void subtract_shared(int *a, int *b, int *c, int n){ extern __shared__ int res[]; // Get our global thread ID int id = blockIdx.x*blockDim.x+threadIdx.x; // Make sure we do not go out of bounds if (id < n) res[threadIdx.x] = a[id] - b[id]; __syncthreads(); // wait for all threads in the block to finish c[threadIdx.x] = res[threadIdx.x]; } // multiply function __global__ void mult_shared(int *a, int *b, int *c, int n){ extern __shared__ int res[]; // Get our global thread ID int id = blockIdx.x*blockDim.x+threadIdx.x; // Make sure we do not go out of bounds if (id < n) res[threadIdx.x] = a[id] * b[id]; __syncthreads(); // wait for all threads in the block to finish c[threadIdx.x] = res[threadIdx.x]; } // Moudulus function __global__ void mod_shared(int *a, int *b, int *c, int n){ extern __shared__ int res[]; // Get our global thread ID int id = blockIdx.x*blockDim.x+threadIdx.x; // Make sure we do not go out of bounds if (id < n) res[threadIdx.x] = a[id] % b[id]; __syncthreads(); // wait for all threads in the block to finish c[threadIdx.x] = res[threadIdx.x]; } /* Arithmetic Functions Using Constant Memory */ // Add Function __global__ void add_const( int *c, int n){ // Get our global thread ID const unsigned int id = blockIdx.x*blockDim.x+threadIdx.x; // Make sure we do not go out of bounds // use the constant data declared if (id < n) c[id] = d_a_const[id] + d_b_const[id]; } // subtract function __global__ void subtract_const(int *c, int n){ // Get our global thread ID const unsigned int id = blockIdx.x*blockDim.x+threadIdx.x; // Make sure we do not go out of bounds if (id < n) c[id] = d_a_const[id] - d_b_const[id]; } // multiply function __global__ void mult_const(int *c, int n){ // Get our global thread ID const unsigned int id = blockIdx.x*blockDim.x+threadIdx.x; // Make sure we do not go out of bounds if (id < n) c[id] = d_a_const[id] * d_b_const[id]; } // Moudulus function __global__ void mod_const(int *c, int n){ // Get our global thread ID const unsigned int id = blockIdx.x*blockDim.x+threadIdx.x; // Make sure we do not go out of bounds if (id < n) c[id] = d_a_const[id] % d_b_const[id]; } /* Function calls to arithmetic functions using shared memory and timing */ void perform_add_shared(int numBlocks, int totalThreads, int *d_a, int *d_b, int *d_c_add){ float time; // events for timing hipEvent_t startEvent, stopEvent; hipEventCreate(&startEvent); hipEventCreate(&stopEvent); //start a recording event and execute the Kernels after hipEventRecord(startEvent, 0); //performing add function printf(" Performing Add function..."); add_shared<<<numBlocks, totalThreads, totalThreads*sizeof(int)>>>(d_a, d_b, d_c_add, totalThreads); hipDeviceSynchronize(); hipEventRecord(stopEvent, 0); //stop hipEventSynchronize(stopEvent); hipEventElapsedTime(&time, startEvent, stopEvent); printf(" Elapsed Time: %f\n", time); // clean up events hipEventDestroy(startEvent); hipEventDestroy(stopEvent); } void perform_sub_shared(int numBlocks, int totalThreads, int *d_a,int *d_b,int *d_c_sub){ float time; // events for timing hipEvent_t startEvent, stopEvent; hipEventCreate(&startEvent); hipEventCreate(&stopEvent); //start a recording event and execute the Kernels after hipEventRecord(startEvent, 0); //performing subtract function printf(" Performing subtract function"); subtract_shared<<<numBlocks, totalThreads, totalThreads*sizeof(int)>>>(d_a, d_b, d_c_sub, totalThreads); hipDeviceSynchronize(); hipEventRecord(stopEvent, 0); //stop hipEventSynchronize(stopEvent); hipEventElapsedTime(&time, startEvent, stopEvent); printf(" Elapsed Time: %f\n", time); // clean up events hipEventDestroy(startEvent); hipEventDestroy(stopEvent); } void perform_mult_shared(int numBlocks, int totalThreads, int *d_a,int *d_b,int *d_c_mult){ float time; // events for timing hipEvent_t startEvent, stopEvent; hipEventCreate(&startEvent); hipEventCreate(&stopEvent); //start a recording event and execute the Kernels after hipEventRecord(startEvent, 0); //performing mult function printf(" Performing mult function"); mult_shared<<<numBlocks, totalThreads, totalThreads*sizeof(int)>>>(d_a, d_b, d_c_mult, totalThreads); hipDeviceSynchronize(); hipEventRecord(stopEvent, 0); //stop hipEventSynchronize(stopEvent); hipEventElapsedTime(&time, startEvent, stopEvent); printf(" Elapsed Time: %f\n", time); // clean up events hipEventDestroy(startEvent); hipEventDestroy(stopEvent); } void perform_mod_shared(int numBlocks, int totalThreads, int *d_a,int *d_b,int *d_c_mod){ float time; // events for timing hipEvent_t startEvent, stopEvent; hipEventCreate(&startEvent); hipEventCreate(&stopEvent); //start a recording event and execute the Kernels after hipEventRecord(startEvent, 0); //performing mod fuction printf(" Performing mod function"); mod_shared<<<numBlocks, totalThreads, totalThreads*sizeof(int)>>>(d_a, d_b, d_c_mod, totalThreads); hipDeviceSynchronize(); hipEventRecord(stopEvent, 0); //stop hipEventSynchronize(stopEvent); hipEventElapsedTime(&time, startEvent, stopEvent); printf(" Elapsed Time: %f\n", time); // clean up events hipEventDestroy(startEvent); hipEventDestroy(stopEvent); } // Shared Memory Implementation function void execute_arithmetic_sharedMem(int totalThreads, int numBlocks){ printf("\t\t*****Executing Arithmetic Functions Using Shared Memory*****\n"); // Host input vectors int *h_a, *h_b; //Host output vectors for different functions "h_c_func" int *h_c_add,*h_c_sub,*h_c_mult,*h_c_mod; // Device input vectors int *d_a, *d_b; //Device output vector int *d_c_add,*d_c_sub,*d_c_mult,*d_c_mod; // Size, in bytes, of each vector const unsigned int bytes = totalThreads*sizeof(int); // Allocate memory for each vector on host Pinned hipHostMalloc((void**)&h_a, bytes, hipHostMallocDefault); hipHostMalloc((void**)&h_b, bytes, hipHostMallocDefault); hipHostMalloc((void**)&h_c_add, bytes, hipHostMallocDefault); hipHostMalloc((void**)&h_c_sub, bytes, hipHostMallocDefault); hipHostMalloc((void**)&h_c_mult, bytes, hipHostMallocDefault); hipHostMalloc((void**)&h_c_mod, bytes, hipHostMallocDefault); // Allocate memory for each vector on GPU hipMalloc(&d_a, bytes); hipMalloc(&d_b, bytes); hipMalloc(&d_c_add, bytes); hipMalloc(&d_c_sub, bytes); hipMalloc(&d_c_mult, bytes); hipMalloc(&d_c_mod, bytes); //initialize the input vectors for(int i = 0;i<totalThreads;i++){ //first array is 0 through number of threads h_a[i] = i; // second array is a random number between 0 and 3 h_b[i] = rand() % 4; } //printf the first 7 elements of input arrays printf("Array 1: "); for(int i = 0; i<7; i++){ printf("%d ", h_a[i]); } printf("\nArray 2: "); for(int i = 0; i<7; i++){ printf("%d ", h_b[i]); } printf("\n\n"); //copy both input arrays from host to device and profile it (see profileCopiesHostToDevice) profileCopiesHostToDevice(d_a, h_a, d_b, h_b, bytes, "Shared"); //Perform arithmetic functions perform_add_shared(numBlocks, totalThreads, d_a, d_b, d_c_add); perform_sub_shared(numBlocks, totalThreads, d_a, d_b, d_c_sub); perform_mult_shared(numBlocks, totalThreads, d_a, d_b, d_c_mult); perform_mod_shared(numBlocks, totalThreads, d_a, d_b, d_c_mod); //copy the output arrays from device to host profileCopiesDeviceToHost(h_c_add,d_c_add,h_c_sub, d_c_sub,h_c_mult, d_c_mult,h_c_mod, d_c_mod, bytes,"Shared"); // printf the first 7 elements of the results printf("Arithmetic Results: \n"); printf("Add: "); for(int i = 0; i<7; i++){ printf("%d ", h_c_add[i]); } printf("\nSubtract: "); for(int i = 0; i<7; i++){ printf("%d ", h_c_sub[i]); } printf("\nMultiply: "); for(int i = 0; i<7; i++){ printf("%d ", h_c_mult[i]); } printf("\nMultiply: "); for(int i = 0; i<7; i++){ printf("%d ", h_c_mod[i]); } printf("\n\n"); //free up space on our GPU hipFree(d_a); hipFree(d_b); hipFree(d_c_add); hipFree(d_c_sub); hipFree(d_c_mult); hipFree(d_c_add); //free up space on our CPU use cudaFreeHost since pinnned hipHostFree(h_a); hipHostFree(h_b); hipHostFree(h_c_add); hipHostFree(h_c_sub); hipHostFree(h_c_mult); hipHostFree(h_c_mod); } /* Function calls to arithmetic functions using constant memory */ void perform_add_const(int numBlocks, int totalThreads,int *d_c_add){ float time; // events for timing hipEvent_t startEvent, stopEvent; hipEventCreate(&startEvent); hipEventCreate(&stopEvent); //start a recording event and execute the Kernels after hipEventRecord(startEvent, 0); //performing add function printf(" Performing Add function..."); add_const<<<numBlocks, totalThreads, totalThreads*sizeof(int)>>>(d_c_add, totalThreads); hipDeviceSynchronize(); hipEventRecord(stopEvent, 0); //stop hipEventSynchronize(stopEvent); hipEventElapsedTime(&time, startEvent, stopEvent); printf(" Elapsed Time: %f\n", time); // clean up events hipEventDestroy(startEvent); hipEventDestroy(stopEvent); } void perform_sub_const(int numBlocks, int totalThreads, int *d_c_sub){ float time; // events for timing hipEvent_t startEvent, stopEvent; hipEventCreate(&startEvent); hipEventCreate(&stopEvent); //start a recording event and execute the Kernels after hipEventRecord(startEvent, 0); //performing subtract function printf(" Performing subtract function"); subtract_const<<<numBlocks, totalThreads, totalThreads*sizeof(int)>>>(d_c_sub, totalThreads); hipDeviceSynchronize(); hipEventRecord(stopEvent, 0); //stop hipEventSynchronize(stopEvent); hipEventElapsedTime(&time, startEvent, stopEvent); printf(" Elapsed Time: %f\n", time); // clean up events hipEventDestroy(startEvent); hipEventDestroy(stopEvent); } void perform_mult_const(int numBlocks, int totalThreads,int *d_c_mult){ float time; // events for timing hipEvent_t startEvent, stopEvent; hipEventCreate(&startEvent); hipEventCreate(&stopEvent); //start a recording event and execute the Kernels after hipEventRecord(startEvent, 0); //performing mult function printf(" Performing mult function"); mult_const<<<numBlocks, totalThreads, totalThreads*sizeof(int)>>>(d_c_mult, totalThreads); hipDeviceSynchronize(); hipEventRecord(stopEvent, 0); //stop hipEventSynchronize(stopEvent); hipEventElapsedTime(&time, startEvent, stopEvent); printf(" Elapsed Time: %f\n", time); // clean up events hipEventDestroy(startEvent); hipEventDestroy(stopEvent); } void perform_mod_const(int numBlocks, int totalThreads, int *d_c_mod){ float time; // events for timing hipEvent_t startEvent, stopEvent; hipEventCreate(&startEvent); hipEventCreate(&stopEvent); //start a recording event and execute the Kernels after hipEventRecord(startEvent, 0); //performing mod fuction printf(" Performing mod function"); mod_const<<<numBlocks, totalThreads, totalThreads*sizeof(int)>>>( d_c_mod, totalThreads); hipDeviceSynchronize(); hipEventRecord(stopEvent, 0); //stop hipEventSynchronize(stopEvent); hipEventElapsedTime(&time, startEvent, stopEvent); printf(" Elapsed Time: %f\n", time); // clean up events hipEventDestroy(startEvent); hipEventDestroy(stopEvent); } // Constant Memory Implementation void execute_arithmetic_constMem(int totalThreads, int numBlocks){ printf("\t\t*****Executing Arithmetic Functions Using Constant Memory*****\n"); // Host input vectors int *h_a, *h_b; //Host output vectors for different functions "h_c_func" int *h_c_add,*h_c_sub,*h_c_mult,*h_c_mod; // Device input vectors int *d_a, *d_b; //Device output vector int *d_c_add,*d_c_sub,*d_c_mult,*d_c_mod; // Size, in bytes, of each vector const unsigned int bytes = totalThreads*sizeof(int); // Allocate memory for each vector on host Pinned hipHostMalloc((void**)&h_a, bytes, hipHostMallocDefault); hipHostMalloc((void**)&h_b, bytes, hipHostMallocDefault); hipHostMalloc((void**)&h_c_add, bytes, hipHostMallocDefault); hipHostMalloc((void**)&h_c_sub, bytes, hipHostMallocDefault); hipHostMalloc((void**)&h_c_mult, bytes, hipHostMallocDefault); hipHostMalloc((void**)&h_c_mod, bytes, hipHostMallocDefault); // Allocate memory for each vector on GPU hipMalloc(&d_a, bytes); hipMalloc(&d_b, bytes); hipMalloc(&d_c_add, bytes); hipMalloc(&d_c_sub, bytes); hipMalloc(&d_c_mult, bytes); hipMalloc(&d_c_mod, bytes); //initialize the input vectors for(int i = 0;i<totalThreads;i++){ //first array is 0 through number of threads h_a[i] = i; // second array is a random number between 0 and 3 h_b[i] = rand() % 4; } //printf the first 7 elements of input arrays printf("Array 1: "); for(int i = 0; i<7; i++){ printf("%d ", h_a[i]); } printf("\nArray 2: "); for(int i = 0; i<7; i++){ printf("%d ", h_b[i]); } printf("\n\n"); //copy both input arrays from host to device using cudaMemcpyToSymbol() (see profileCopiesHostToDevice) profileCopiesHostToDevice(d_a_const, h_a, d_b_const, h_b, bytes, "Constant"); //Perform arithmetic functions perform_add_const(numBlocks, totalThreads, d_c_add); perform_sub_const(numBlocks, totalThreads, d_c_sub); perform_mult_const(numBlocks, totalThreads, d_c_mult); perform_mod_const(numBlocks, totalThreads, d_c_mod); //copy the output arrays from device to host using cudaMemcyFromSymbol() profileCopiesDeviceToHost(h_c_add,d_c_add,h_c_sub, d_c_sub,h_c_mult, d_c_mult,h_c_mod, d_c_mod, bytes,"Constant"); // printf the first 7 elements of the results printf("Arithmetic Results: \n"); printf("Add: "); for(int i = 0; i<7; i++){ printf("%d ", h_c_add[i]); } printf("\nSubtract: "); for(int i = 0; i<7; i++){ printf("%d ", h_c_sub[i]); } printf("\nMultiply: "); for(int i = 0; i<7; i++){ printf("%d ", h_c_mult[i]); } printf("\nMod: "); for(int i = 0; i<7; i++){ printf("%d ", h_c_mod[i]); } printf("\n\n"); //free up space on our GPU hipFree(d_a); hipFree(d_b); hipFree(d_c_add); hipFree(d_c_sub); hipFree(d_c_mult); hipFree(d_c_add); //free up space on our CPU use cudaFreeHost since pinnned hipHostFree(h_a); hipHostFree(h_b); hipHostFree(h_c_add); hipHostFree(h_c_sub); hipHostFree(h_c_mult); hipHostFree(h_c_mod); } int main(int argc, char** argv) { int totalThreads = (1 << 10); int blockSize = 256; //User wants to run the Global vs Pinned Examples if( argc > 2 && argc < 4){ // Ensure the user supplies both number of threads and block size // otherwise use default values totalThreads = atoi(argv[1]); blockSize = atoi(argv[2]); } int numBlocks = totalThreads/blockSize; printf("\nUsing %d Threads and %d BlockSize\n",totalThreads, blockSize); // validate command line arguments if (totalThreads % blockSize != 0) { ++numBlocks; totalThreads = numBlocks*blockSize; printf("Warning: Total thread count is not evenly divisible by the block size\n"); printf("The total number of threads will be rounded up to %d\n", totalThreads); } // Lets see what we are working with and calculate the Amount of data we are transfering hipDeviceProp_t prop; hipGetDeviceProperties(&prop,0); const unsigned int bytes = totalThreads*sizeof(int); printf("\nDevice: %s\n", prop.name); printf("Transfer size (MB): %d\n\n", bytes * bytes / totalThreads); //Execute Pageable Arithmetic execute_arithmetic_sharedMem(totalThreads, numBlocks); //Execute The Pinned Arithmetic execute_arithmetic_constMem(totalThreads, numBlocks); return 0; }
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> //初始化CUDA int count=0; bool InitCUDA() { printf("Start to detecte devices.........\n");//显示检测到的设备数 cudaGetDeviceCount(&count);//检测计算能力大于等于1.0 的设备数 if(count == 0) { fprintf(stderr, "There is no device.\n"); return false; } printf("%d device/s detected.\n",count);//显示检测到的设备数 int i; for(i = 0; i < count; i++) {//依次验证检测到的设备是否支持CUDA cudaDeviceProp prop; if(cudaGetDeviceProperties(&prop, i) == cudaSuccess) {//获得设备属性并验证是否正确 if(prop.major >= 1)//验证主计算能力,即计算能力的第一位数是否大于1 { printf("Device %d: %s supportsCUDA %d.%d.\n",i+1,prop.name,prop.major,prop.minor);//显示检测到的设备支持的CUDA 版本 break; } } } if(i == count) {//没有支持CUDA1.x 的设备 fprintf(stderr, "There is no device supporting CUDA 1.x.\n"); return false; } cudaSetDevice(i);//设置设备为主叫线程的当前设备 return true; } int main() { if(!InitCUDA()) {//初始化失败返回系统int argc, char** argv return 0; } printf("Hello GPU! CUDA has been initialized.\n"); //exit(argc ? EXIT_SUCCESS : EXIT_FAILURE); return 0;//返回系统 }
code for sm_80
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> //初始化CUDA int count=0; bool InitCUDA() { printf("Start to detecte devices.........\n");//显示检测到的设备数 cudaGetDeviceCount(&count);//检测计算能力大于等于1.0 的设备数 if(count == 0) { fprintf(stderr, "There is no device.\n"); return false; } printf("%d device/s detected.\n",count);//显示检测到的设备数 int i; for(i = 0; i < count; i++) {//依次验证检测到的设备是否支持CUDA cudaDeviceProp prop; if(cudaGetDeviceProperties(&prop, i) == cudaSuccess) {//获得设备属性并验证是否正确 if(prop.major >= 1)//验证主计算能力,即计算能力的第一位数是否大于1 { printf("Device %d: %s supportsCUDA %d.%d.\n",i+1,prop.name,prop.major,prop.minor);//显示检测到的设备支持的CUDA 版本 break; } } } if(i == count) {//没有支持CUDA1.x 的设备 fprintf(stderr, "There is no device supporting CUDA 1.x.\n"); return false; } cudaSetDevice(i);//设置设备为主叫线程的当前设备 return true; } int main() { if(!InitCUDA()) {//初始化失败返回系统int argc, char** argv return 0; } printf("Hello GPU! CUDA has been initialized.\n"); //exit(argc ? EXIT_SUCCESS : EXIT_FAILURE); return 0;//返回系统 }
.file "tmpxft_00150672_00000000-6_a.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2061: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2061: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC0: .string "Start to detecte devices.........\n" .section .rodata.str1.1,"aMS",@progbits,1 .LC1: .string "There is no device.\n" .LC2: .string "%d device/s detected.\n" .section .rodata.str1.8 .align 8 .LC3: .string "Device %d: %s supportsCUDA %d.%d.\n" .align 8 .LC4: .string "There is no device supporting CUDA 1.x.\n" .text .globl _Z8InitCUDAv .type _Z8InitCUDAv, @function _Z8InitCUDAv: .LFB2057: .cfi_startproc endbr64 pushq %rbp .cfi_def_cfa_offset 16 .cfi_offset 6, -16 pushq %rbx .cfi_def_cfa_offset 24 .cfi_offset 3, -24 subq $1048, %rsp .cfi_def_cfa_offset 1072 movq %fs:40, %rax movq %rax, 1032(%rsp) xorl %eax, %eax leaq .LC0(%rip), %rsi movl $2, %edi call __printf_chk@PLT leaq count(%rip), %rdi call cudaGetDeviceCount@PLT movl count(%rip), %edx testl %edx, %edx je .L14 leaq .LC2(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT cmpl $0, count(%rip) jle .L11 movl $0, %ebx movq %rsp, %rbp jmp .L8 .L14: leaq .LC1(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $0, %eax jmp .L3 .L7: addl $1, %ebx cmpl %ebx, count(%rip) jle .L6 .L8: movl %ebx, %esi movq %rbp, %rdi call cudaGetDeviceProperties_v2@PLT testl %eax, %eax jne .L7 movl 360(%rsp), %r8d testl %r8d, %r8d jle .L7 movq %rsp, %rcx leal 1(%rbx), %edx movl 364(%rsp), %r9d leaq .LC3(%rip), %rsi movl $2, %edi call __printf_chk@PLT .L6: cmpl %ebx, count(%rip) je .L15 movl %ebx, %edi call cudaSetDevice@PLT movl $1, %eax .L3: movq 1032(%rsp), %rdx subq %fs:40, %rdx jne .L16 addq $1048, %rsp .cfi_remember_state .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 ret .L11: .cfi_restore_state movl $0, %ebx jmp .L6 .L15: leaq .LC4(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $0, %eax jmp .L3 .L16: call __stack_chk_fail@PLT .cfi_endproc .LFE2057: .size _Z8InitCUDAv, .-_Z8InitCUDAv .section .rodata.str1.8 .align 8 .LC5: .string "Hello GPU! CUDA has been initialized.\n" .text .globl main .type main, @function main: .LFB2058: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z8InitCUDAv testb %al, %al jne .L20 .L18: movl $0, %eax addq $8, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L20: .cfi_restore_state leaq .LC5(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT jmp .L18 .cfi_endproc .LFE2058: .size main, .-main .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2084: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2084: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .globl count .bss .align 4 .type count, @object .size count, 4 count: .zero 4 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> //初始化CUDA int count=0; bool InitCUDA() { printf("Start to detecte devices.........\n");//显示检测到的设备数 cudaGetDeviceCount(&count);//检测计算能力大于等于1.0 的设备数 if(count == 0) { fprintf(stderr, "There is no device.\n"); return false; } printf("%d device/s detected.\n",count);//显示检测到的设备数 int i; for(i = 0; i < count; i++) {//依次验证检测到的设备是否支持CUDA cudaDeviceProp prop; if(cudaGetDeviceProperties(&prop, i) == cudaSuccess) {//获得设备属性并验证是否正确 if(prop.major >= 1)//验证主计算能力,即计算能力的第一位数是否大于1 { printf("Device %d: %s supportsCUDA %d.%d.\n",i+1,prop.name,prop.major,prop.minor);//显示检测到的设备支持的CUDA 版本 break; } } } if(i == count) {//没有支持CUDA1.x 的设备 fprintf(stderr, "There is no device supporting CUDA 1.x.\n"); return false; } cudaSetDevice(i);//设置设备为主叫线程的当前设备 return true; } int main() { if(!InitCUDA()) {//初始化失败返回系统int argc, char** argv return 0; } printf("Hello GPU! CUDA has been initialized.\n"); //exit(argc ? EXIT_SUCCESS : EXIT_FAILURE); return 0;//返回系统 }
#include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> //初始化CUDA int count=0; bool InitCUDA() { printf("Start to detecte devices.........\n");//显示检测到的设备数 hipGetDeviceCount(&count);//检测计算能力大于等于1.0 的设备数 if(count == 0) { fprintf(stderr, "There is no device.\n"); return false; } printf("%d device/s detected.\n",count);//显示检测到的设备数 int i; for(i = 0; i < count; i++) {//依次验证检测到的设备是否支持CUDA hipDeviceProp_t prop; if(hipGetDeviceProperties(&prop, i) == hipSuccess) {//获得设备属性并验证是否正确 if(prop.major >= 1)//验证主计算能力,即计算能力的第一位数是否大于1 { printf("Device %d: %s supportsCUDA %d.%d.\n",i+1,prop.name,prop.major,prop.minor);//显示检测到的设备支持的CUDA 版本 break; } } } if(i == count) {//没有支持CUDA1.x 的设备 fprintf(stderr, "There is no device supporting CUDA 1.x.\n"); return false; } hipSetDevice(i);//设置设备为主叫线程的当前设备 return true; } int main() { if(!InitCUDA()) {//初始化失败返回系统int argc, char** argv return 0; } printf("Hello GPU! CUDA has been initialized.\n"); //exit(argc ? EXIT_SUCCESS : EXIT_FAILURE); return 0;//返回系统 }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> //初始化CUDA int count=0; bool InitCUDA() { printf("Start to detecte devices.........\n");//显示检测到的设备数 hipGetDeviceCount(&count);//检测计算能力大于等于1.0 的设备数 if(count == 0) { fprintf(stderr, "There is no device.\n"); return false; } printf("%d device/s detected.\n",count);//显示检测到的设备数 int i; for(i = 0; i < count; i++) {//依次验证检测到的设备是否支持CUDA hipDeviceProp_t prop; if(hipGetDeviceProperties(&prop, i) == hipSuccess) {//获得设备属性并验证是否正确 if(prop.major >= 1)//验证主计算能力,即计算能力的第一位数是否大于1 { printf("Device %d: %s supportsCUDA %d.%d.\n",i+1,prop.name,prop.major,prop.minor);//显示检测到的设备支持的CUDA 版本 break; } } } if(i == count) {//没有支持CUDA1.x 的设备 fprintf(stderr, "There is no device supporting CUDA 1.x.\n"); return false; } hipSetDevice(i);//设置设备为主叫线程的当前设备 return true; } int main() { if(!InitCUDA()) {//初始化失败返回系统int argc, char** argv return 0; } printf("Hello GPU! CUDA has been initialized.\n"); //exit(argc ? EXIT_SUCCESS : EXIT_FAILURE); return 0;//返回系统 }
.text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .amdgpu_metadata --- amdhsa.kernels: [] amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> //初始化CUDA int count=0; bool InitCUDA() { printf("Start to detecte devices.........\n");//显示检测到的设备数 hipGetDeviceCount(&count);//检测计算能力大于等于1.0 的设备数 if(count == 0) { fprintf(stderr, "There is no device.\n"); return false; } printf("%d device/s detected.\n",count);//显示检测到的设备数 int i; for(i = 0; i < count; i++) {//依次验证检测到的设备是否支持CUDA hipDeviceProp_t prop; if(hipGetDeviceProperties(&prop, i) == hipSuccess) {//获得设备属性并验证是否正确 if(prop.major >= 1)//验证主计算能力,即计算能力的第一位数是否大于1 { printf("Device %d: %s supportsCUDA %d.%d.\n",i+1,prop.name,prop.major,prop.minor);//显示检测到的设备支持的CUDA 版本 break; } } } if(i == count) {//没有支持CUDA1.x 的设备 fprintf(stderr, "There is no device supporting CUDA 1.x.\n"); return false; } hipSetDevice(i);//设置设备为主叫线程的当前设备 return true; } int main() { if(!InitCUDA()) {//初始化失败返回系统int argc, char** argv return 0; } printf("Hello GPU! CUDA has been initialized.\n"); //exit(argc ? EXIT_SUCCESS : EXIT_FAILURE); return 0;//返回系统 }
.text .file "a.hip" .globl _Z8InitCUDAv # -- Begin function _Z8InitCUDAv .p2align 4, 0x90 .type _Z8InitCUDAv,@function _Z8InitCUDAv: # @_Z8InitCUDAv .cfi_startproc # %bb.0: pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %rbx .cfi_def_cfa_offset 32 subq $1472, %rsp # imm = 0x5C0 .cfi_def_cfa_offset 1504 .cfi_offset %rbx, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movl $.Lstr, %edi callq puts@PLT movl $count, %edi callq hipGetDeviceCount movl count(%rip), %esi testl %esi, %esi je .LBB0_1 # %bb.3: xorl %ebx, %ebx movl $.L.str.2, %edi xorl %eax, %eax callq printf cmpl $0, count(%rip) jle .LBB0_10 # %bb.4: # %.lr.ph movl $1, %r14d movq %rsp, %r15 jmp .LBB0_5 .p2align 4, 0x90 .LBB0_8: # %.critedge # in Loop: Header=BB0_5 Depth=1 leal 1(%r14), %ebx cmpl count(%rip), %r14d movl %ebx, %r14d jge .LBB0_9 .LBB0_5: # =>This Inner Loop Header: Depth=1 leal -1(%r14), %ebx movq %r15, %rdi movl %ebx, %esi callq hipGetDevicePropertiesR0600 testl %eax, %eax jne .LBB0_8 # %bb.6: # in Loop: Header=BB0_5 Depth=1 movl 360(%rsp), %ecx testl %ecx, %ecx jle .LBB0_8 # %bb.7: movl 364(%rsp), %r8d movq %rsp, %rdx movl $.L.str.3, %edi movl %r14d, %esi xorl %eax, %eax callq printf .LBB0_10: # %.loopexit cmpl count(%rip), %ebx je .LBB0_11 .LBB0_12: movl %ebx, %edi callq hipSetDevice movb $1, %al .LBB0_13: # kill: def $al killed $al killed $eax addq $1472, %rsp # imm = 0x5C0 .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 retq .LBB0_9: # %.loopexit.loopexit .cfi_def_cfa_offset 1504 decl %ebx cmpl count(%rip), %ebx jne .LBB0_12 .LBB0_11: movq stderr(%rip), %rcx movl $.L.str.4, %edi movl $40, %esi jmp .LBB0_2 .LBB0_1: movq stderr(%rip), %rcx movl $.L.str.1, %edi movl $20, %esi .LBB0_2: movl $1, %edx callq fwrite@PLT xorl %eax, %eax jmp .LBB0_13 .Lfunc_end0: .size _Z8InitCUDAv, .Lfunc_end0-_Z8InitCUDAv .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rax .cfi_def_cfa_offset 16 callq _Z8InitCUDAv testb %al, %al je .LBB1_2 # %bb.1: movl $.Lstr.1, %edi callq puts@PLT .LBB1_2: xorl %eax, %eax popq %rcx .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .type count,@object # @count .bss .globl count .p2align 2, 0x0 count: .long 0 # 0x0 .size count, 4 .type .L.str.1,@object # @.str.1 .section .rodata.str1.1,"aMS",@progbits,1 .L.str.1: .asciz "There is no device.\n" .size .L.str.1, 21 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz "%d device/s detected.\n" .size .L.str.2, 23 .type .L.str.3,@object # @.str.3 .L.str.3: .asciz "Device %d: %s supportsCUDA %d.%d.\n" .size .L.str.3, 35 .type .L.str.4,@object # @.str.4 .L.str.4: .asciz "There is no device supporting CUDA 1.x.\n" .size .L.str.4, 41 .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .type .Lstr,@object # @str .section .rodata.str1.1,"aMS",@progbits,1 .Lstr: .asciz "Start to detecte devices........." .size .Lstr, 34 .type .Lstr.1,@object # @str.1 .Lstr.1: .asciz "Hello GPU! CUDA has been initialized." .size .Lstr.1, 38 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym count .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80
.text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .amdgpu_metadata --- amdhsa.kernels: [] amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_00150672_00000000-6_a.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2061: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2061: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC0: .string "Start to detecte devices.........\n" .section .rodata.str1.1,"aMS",@progbits,1 .LC1: .string "There is no device.\n" .LC2: .string "%d device/s detected.\n" .section .rodata.str1.8 .align 8 .LC3: .string "Device %d: %s supportsCUDA %d.%d.\n" .align 8 .LC4: .string "There is no device supporting CUDA 1.x.\n" .text .globl _Z8InitCUDAv .type _Z8InitCUDAv, @function _Z8InitCUDAv: .LFB2057: .cfi_startproc endbr64 pushq %rbp .cfi_def_cfa_offset 16 .cfi_offset 6, -16 pushq %rbx .cfi_def_cfa_offset 24 .cfi_offset 3, -24 subq $1048, %rsp .cfi_def_cfa_offset 1072 movq %fs:40, %rax movq %rax, 1032(%rsp) xorl %eax, %eax leaq .LC0(%rip), %rsi movl $2, %edi call __printf_chk@PLT leaq count(%rip), %rdi call cudaGetDeviceCount@PLT movl count(%rip), %edx testl %edx, %edx je .L14 leaq .LC2(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT cmpl $0, count(%rip) jle .L11 movl $0, %ebx movq %rsp, %rbp jmp .L8 .L14: leaq .LC1(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $0, %eax jmp .L3 .L7: addl $1, %ebx cmpl %ebx, count(%rip) jle .L6 .L8: movl %ebx, %esi movq %rbp, %rdi call cudaGetDeviceProperties_v2@PLT testl %eax, %eax jne .L7 movl 360(%rsp), %r8d testl %r8d, %r8d jle .L7 movq %rsp, %rcx leal 1(%rbx), %edx movl 364(%rsp), %r9d leaq .LC3(%rip), %rsi movl $2, %edi call __printf_chk@PLT .L6: cmpl %ebx, count(%rip) je .L15 movl %ebx, %edi call cudaSetDevice@PLT movl $1, %eax .L3: movq 1032(%rsp), %rdx subq %fs:40, %rdx jne .L16 addq $1048, %rsp .cfi_remember_state .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 ret .L11: .cfi_restore_state movl $0, %ebx jmp .L6 .L15: leaq .LC4(%rip), %rdx movl $2, %esi movq stderr(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $0, %eax jmp .L3 .L16: call __stack_chk_fail@PLT .cfi_endproc .LFE2057: .size _Z8InitCUDAv, .-_Z8InitCUDAv .section .rodata.str1.8 .align 8 .LC5: .string "Hello GPU! CUDA has been initialized.\n" .text .globl main .type main, @function main: .LFB2058: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z8InitCUDAv testb %al, %al jne .L20 .L18: movl $0, %eax addq $8, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L20: .cfi_restore_state leaq .LC5(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT jmp .L18 .cfi_endproc .LFE2058: .size main, .-main .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2084: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2084: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .globl count .bss .align 4 .type count, @object .size count, 4 count: .zero 4 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "a.hip" .globl _Z8InitCUDAv # -- Begin function _Z8InitCUDAv .p2align 4, 0x90 .type _Z8InitCUDAv,@function _Z8InitCUDAv: # @_Z8InitCUDAv .cfi_startproc # %bb.0: pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %rbx .cfi_def_cfa_offset 32 subq $1472, %rsp # imm = 0x5C0 .cfi_def_cfa_offset 1504 .cfi_offset %rbx, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 movl $.Lstr, %edi callq puts@PLT movl $count, %edi callq hipGetDeviceCount movl count(%rip), %esi testl %esi, %esi je .LBB0_1 # %bb.3: xorl %ebx, %ebx movl $.L.str.2, %edi xorl %eax, %eax callq printf cmpl $0, count(%rip) jle .LBB0_10 # %bb.4: # %.lr.ph movl $1, %r14d movq %rsp, %r15 jmp .LBB0_5 .p2align 4, 0x90 .LBB0_8: # %.critedge # in Loop: Header=BB0_5 Depth=1 leal 1(%r14), %ebx cmpl count(%rip), %r14d movl %ebx, %r14d jge .LBB0_9 .LBB0_5: # =>This Inner Loop Header: Depth=1 leal -1(%r14), %ebx movq %r15, %rdi movl %ebx, %esi callq hipGetDevicePropertiesR0600 testl %eax, %eax jne .LBB0_8 # %bb.6: # in Loop: Header=BB0_5 Depth=1 movl 360(%rsp), %ecx testl %ecx, %ecx jle .LBB0_8 # %bb.7: movl 364(%rsp), %r8d movq %rsp, %rdx movl $.L.str.3, %edi movl %r14d, %esi xorl %eax, %eax callq printf .LBB0_10: # %.loopexit cmpl count(%rip), %ebx je .LBB0_11 .LBB0_12: movl %ebx, %edi callq hipSetDevice movb $1, %al .LBB0_13: # kill: def $al killed $al killed $eax addq $1472, %rsp # imm = 0x5C0 .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 retq .LBB0_9: # %.loopexit.loopexit .cfi_def_cfa_offset 1504 decl %ebx cmpl count(%rip), %ebx jne .LBB0_12 .LBB0_11: movq stderr(%rip), %rcx movl $.L.str.4, %edi movl $40, %esi jmp .LBB0_2 .LBB0_1: movq stderr(%rip), %rcx movl $.L.str.1, %edi movl $20, %esi .LBB0_2: movl $1, %edx callq fwrite@PLT xorl %eax, %eax jmp .LBB0_13 .Lfunc_end0: .size _Z8InitCUDAv, .Lfunc_end0-_Z8InitCUDAv .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rax .cfi_def_cfa_offset 16 callq _Z8InitCUDAv testb %al, %al je .LBB1_2 # %bb.1: movl $.Lstr.1, %edi callq puts@PLT .LBB1_2: xorl %eax, %eax popq %rcx .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .type count,@object # @count .bss .globl count .p2align 2, 0x0 count: .long 0 # 0x0 .size count, 4 .type .L.str.1,@object # @.str.1 .section .rodata.str1.1,"aMS",@progbits,1 .L.str.1: .asciz "There is no device.\n" .size .L.str.1, 21 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz "%d device/s detected.\n" .size .L.str.2, 23 .type .L.str.3,@object # @.str.3 .L.str.3: .asciz "Device %d: %s supportsCUDA %d.%d.\n" .size .L.str.3, 35 .type .L.str.4,@object # @.str.4 .L.str.4: .asciz "There is no device supporting CUDA 1.x.\n" .size .L.str.4, 41 .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .type .Lstr,@object # @str .section .rodata.str1.1,"aMS",@progbits,1 .Lstr: .asciz "Start to detecte devices........." .size .Lstr, 34 .type .Lstr.1,@object # @str.1 .Lstr.1: .asciz "Hello GPU! CUDA has been initialized." .size .Lstr.1, 38 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym count .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include <iostream> using namespace std; __global__ void kernel(){ //it does nothing for now }; int main(){ kernel<<<1,1>>>(); cout<<"Hello, world! \n"; return 0; }
code for sm_80 Function : _Z6kernelv .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0020*/ BRA 0x20; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0030*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0040*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0050*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0060*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0070*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0080*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0090*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include <iostream> using namespace std; __global__ void kernel(){ //it does nothing for now }; int main(){ kernel<<<1,1>>>(); cout<<"Hello, world! \n"; return 0; }
.file "tmpxft_000dc642_00000000-6_hello.cudafe1.cpp" .text #APP .globl _ZSt21ios_base_library_initv #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB3672: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3672: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z24__device_stub__Z6kernelvv .type _Z24__device_stub__Z6kernelvv, @function _Z24__device_stub__Z6kernelvv: .LFB3694: .cfi_startproc endbr64 subq $88, %rsp .cfi_def_cfa_offset 96 movq %fs:40, %rax movq %rax, 72(%rsp) xorl %eax, %eax movl $1, 16(%rsp) movl $1, 20(%rsp) movl $1, 24(%rsp) movl $1, 28(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) leaq 8(%rsp), %rcx movq %rsp, %rdx leaq 28(%rsp), %rsi leaq 16(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 72(%rsp), %rax subq %fs:40, %rax jne .L8 addq $88, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 8(%rsp) .cfi_def_cfa_offset 104 pushq 8(%rsp) .cfi_def_cfa_offset 112 leaq 80(%rsp), %r9 movq 44(%rsp), %rcx movl 52(%rsp), %r8d movq 32(%rsp), %rsi movl 40(%rsp), %edx leaq _Z6kernelv(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 96 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE3694: .size _Z24__device_stub__Z6kernelvv, .-_Z24__device_stub__Z6kernelvv .globl _Z6kernelv .type _Z6kernelv, @function _Z6kernelv: .LFB3695: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z24__device_stub__Z6kernelvv addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3695: .size _Z6kernelv, .-_Z6kernelv .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "Hello, world! \n" .text .globl main .type main, @function main: .LFB3669: .cfi_startproc endbr64 subq $40, %rsp .cfi_def_cfa_offset 48 movl $1, 20(%rsp) movl $1, 24(%rsp) movl $1, 8(%rsp) movl $1, 12(%rsp) movl $0, %r9d movl $0, %r8d movq 20(%rsp), %rdx movl $1, %ecx movq 8(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L14 .L12: leaq .LC0(%rip), %rsi leaq _ZSt4cout(%rip), %rdi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movl $0, %eax addq $40, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L14: .cfi_restore_state call _Z24__device_stub__Z6kernelvv jmp .L12 .cfi_endproc .LFE3669: .size main, .-main .section .rodata.str1.1 .LC1: .string "_Z6kernelv" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB3697: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC1(%rip), %rdx movq %rdx, %rcx leaq _Z6kernelv(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3697: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <iostream> using namespace std; __global__ void kernel(){ //it does nothing for now }; int main(){ kernel<<<1,1>>>(); cout<<"Hello, world! \n"; return 0; }
#include <hip/hip_runtime.h> #include <iostream> using namespace std; __global__ void kernel(){ //it does nothing for now }; int main(){ kernel<<<1,1>>>(); cout<<"Hello, world! \n"; return 0; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include <iostream> using namespace std; __global__ void kernel(){ //it does nothing for now }; int main(){ kernel<<<1,1>>>(); cout<<"Hello, world! \n"; return 0; }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z6kernelv .globl _Z6kernelv .p2align 8 .type _Z6kernelv,@function _Z6kernelv: s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z6kernelv .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 0 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 0 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 1 .amdhsa_next_free_sgpr 1 .amdhsa_reserve_vcc 0 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z6kernelv, .Lfunc_end0-_Z6kernelv .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: [] .group_segment_fixed_size: 0 .kernarg_segment_align: 4 .kernarg_segment_size: 0 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z6kernelv .private_segment_fixed_size: 0 .sgpr_count: 0 .sgpr_spill_count: 0 .symbol: _Z6kernelv.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 0 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include <iostream> using namespace std; __global__ void kernel(){ //it does nothing for now }; int main(){ kernel<<<1,1>>>(); cout<<"Hello, world! \n"; return 0; }
.text .file "hello.hip" # Start of file scope inline assembly .globl _ZSt21ios_base_library_initv # End of file scope inline assembly .globl _Z21__device_stub__kernelv # -- Begin function _Z21__device_stub__kernelv .p2align 4, 0x90 .type _Z21__device_stub__kernelv,@function _Z21__device_stub__kernelv: # @_Z21__device_stub__kernelv .cfi_startproc # %bb.0: subq $56, %rsp .cfi_def_cfa_offset 64 leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 48(%rsp), %r9 movl $_Z6kernelv, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $72, %rsp .cfi_adjust_cfa_offset -72 retq .Lfunc_end0: .size _Z21__device_stub__kernelv, .Lfunc_end0-_Z21__device_stub__kernelv .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: subq $56, %rsp .cfi_def_cfa_offset 64 movabsq $4294967297, %rdi # imm = 0x100000001 movl $1, %esi movq %rdi, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_2 # %bb.1: leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 48(%rsp), %r9 movl $_Z6kernelv, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_2: movl $_ZSt4cout, %edi movl $.L.str, %esi movl $15, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l xorl %eax, %eax addq $56, %rsp .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB2_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB2_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z6kernelv, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end2: .size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB3_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB3_2: retq .Lfunc_end3: .size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor .cfi_endproc # -- End function .type _Z6kernelv,@object # @_Z6kernelv .section .rodata,"a",@progbits .globl _Z6kernelv .p2align 3, 0x0 _Z6kernelv: .quad _Z21__device_stub__kernelv .size _Z6kernelv, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "Hello, world! \n" .size .L.str, 16 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z6kernelv" .size .L__unnamed_1, 11 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z21__device_stub__kernelv .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z6kernelv .addrsig_sym _ZSt4cout .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z6kernelv .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0020*/ BRA 0x20; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0030*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0040*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0050*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0060*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0070*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0080*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0090*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z6kernelv .globl _Z6kernelv .p2align 8 .type _Z6kernelv,@function _Z6kernelv: s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z6kernelv .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 0 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 0 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 1 .amdhsa_next_free_sgpr 1 .amdhsa_reserve_vcc 0 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z6kernelv, .Lfunc_end0-_Z6kernelv .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: [] .group_segment_fixed_size: 0 .kernarg_segment_align: 4 .kernarg_segment_size: 0 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z6kernelv .private_segment_fixed_size: 0 .sgpr_count: 0 .sgpr_spill_count: 0 .symbol: _Z6kernelv.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 0 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_000dc642_00000000-6_hello.cudafe1.cpp" .text #APP .globl _ZSt21ios_base_library_initv #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB3672: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3672: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z24__device_stub__Z6kernelvv .type _Z24__device_stub__Z6kernelvv, @function _Z24__device_stub__Z6kernelvv: .LFB3694: .cfi_startproc endbr64 subq $88, %rsp .cfi_def_cfa_offset 96 movq %fs:40, %rax movq %rax, 72(%rsp) xorl %eax, %eax movl $1, 16(%rsp) movl $1, 20(%rsp) movl $1, 24(%rsp) movl $1, 28(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) leaq 8(%rsp), %rcx movq %rsp, %rdx leaq 28(%rsp), %rsi leaq 16(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 72(%rsp), %rax subq %fs:40, %rax jne .L8 addq $88, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 8(%rsp) .cfi_def_cfa_offset 104 pushq 8(%rsp) .cfi_def_cfa_offset 112 leaq 80(%rsp), %r9 movq 44(%rsp), %rcx movl 52(%rsp), %r8d movq 32(%rsp), %rsi movl 40(%rsp), %edx leaq _Z6kernelv(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 96 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE3694: .size _Z24__device_stub__Z6kernelvv, .-_Z24__device_stub__Z6kernelvv .globl _Z6kernelv .type _Z6kernelv, @function _Z6kernelv: .LFB3695: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z24__device_stub__Z6kernelvv addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3695: .size _Z6kernelv, .-_Z6kernelv .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "Hello, world! \n" .text .globl main .type main, @function main: .LFB3669: .cfi_startproc endbr64 subq $40, %rsp .cfi_def_cfa_offset 48 movl $1, 20(%rsp) movl $1, 24(%rsp) movl $1, 8(%rsp) movl $1, 12(%rsp) movl $0, %r9d movl $0, %r8d movq 20(%rsp), %rdx movl $1, %ecx movq 8(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L14 .L12: leaq .LC0(%rip), %rsi leaq _ZSt4cout(%rip), %rdi call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT movl $0, %eax addq $40, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L14: .cfi_restore_state call _Z24__device_stub__Z6kernelvv jmp .L12 .cfi_endproc .LFE3669: .size main, .-main .section .rodata.str1.1 .LC1: .string "_Z6kernelv" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB3697: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC1(%rip), %rdx movq %rdx, %rcx leaq _Z6kernelv(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE3697: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "hello.hip" # Start of file scope inline assembly .globl _ZSt21ios_base_library_initv # End of file scope inline assembly .globl _Z21__device_stub__kernelv # -- Begin function _Z21__device_stub__kernelv .p2align 4, 0x90 .type _Z21__device_stub__kernelv,@function _Z21__device_stub__kernelv: # @_Z21__device_stub__kernelv .cfi_startproc # %bb.0: subq $56, %rsp .cfi_def_cfa_offset 64 leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 48(%rsp), %r9 movl $_Z6kernelv, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $72, %rsp .cfi_adjust_cfa_offset -72 retq .Lfunc_end0: .size _Z21__device_stub__kernelv, .Lfunc_end0-_Z21__device_stub__kernelv .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: subq $56, %rsp .cfi_def_cfa_offset 64 movabsq $4294967297, %rdi # imm = 0x100000001 movl $1, %esi movq %rdi, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_2 # %bb.1: leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 48(%rsp), %r9 movl $_Z6kernelv, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_2: movl $_ZSt4cout, %edi movl $.L.str, %esi movl $15, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l xorl %eax, %eax addq $56, %rsp .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB2_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB2_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z6kernelv, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end2: .size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB3_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB3_2: retq .Lfunc_end3: .size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor .cfi_endproc # -- End function .type _Z6kernelv,@object # @_Z6kernelv .section .rodata,"a",@progbits .globl _Z6kernelv .p2align 3, 0x0 _Z6kernelv: .quad _Z21__device_stub__kernelv .size _Z6kernelv, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "Hello, world! \n" .size .L.str, 16 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z6kernelv" .size .L__unnamed_1, 11 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z21__device_stub__kernelv .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z6kernelv .addrsig_sym _ZSt4cout .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include "includes.h" __global__ void PositiveDefiniteKernel( char *hessian_pd, float *hessian, int imageW, int imageH, int imageD ) { const int baseX = blockIdx.x * PD_BLOCKDIM_X + threadIdx.x; const int baseY = blockIdx.y * PD_BLOCKDIM_Y + threadIdx.y; const int baseZ = blockIdx.z * PD_BLOCKDIM_Z + threadIdx.z; const int size = imageW * imageH * imageD; const int idx = (baseZ * imageH + baseY) * imageW + baseX; float xx = hessian[idx]; float xy = hessian[idx + size]; float xz = hessian[idx + size*2]; float yy = hessian[idx + size*3]; float yz = hessian[idx + size*4]; float zz = hessian[idx + size*5]; // Sylvester's criterion hessian_pd[idx] = ( xx < 0 && xx*yy-xy*xy > 0 && xx*yy*zz + 2*xy*yz*xz - xx*yz*yz - yy*xz*xz - zz*xy*xy < 0 ) ? 1 : 0; }
code for sm_80 Function : _Z22PositiveDefiniteKernelPcPfiii .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R2, SR_CTAID.Y ; /* 0x0000000000027919 */ /* 0x000e220000002600 */ /*0020*/ MOV R6, c[0x0][0x174] ; /* 0x00005d0000067a02 */ /* 0x000fe20000000f00 */ /*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe40000000a00 */ /*0040*/ S2R R3, SR_TID.Y ; /* 0x0000000000037919 */ /* 0x000e280000002200 */ /*0050*/ S2R R4, SR_CTAID.Z ; /* 0x0000000000047919 */ /* 0x000e680000002700 */ /*0060*/ S2R R7, SR_TID.Z ; /* 0x0000000000077919 */ /* 0x000e680000002300 */ /*0070*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */ /* 0x000ea80000002500 */ /*0080*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */ /* 0x000ea20000002100 */ /*0090*/ LEA R2, R2, R3, 0x3 ; /* 0x0000000302027211 */ /* 0x001fc400078e18ff */ /*00a0*/ LEA R3, R4, R7, 0x3 ; /* 0x0000000704037211 */ /* 0x002fe200078e18ff */ /*00b0*/ IMAD R4, R6, c[0x0][0x170], RZ ; /* 0x00005c0006047a24 */ /* 0x000fc800078e02ff */ /*00c0*/ IMAD R3, R3, c[0x0][0x174], R2 ; /* 0x00005d0003037a24 */ /* 0x000fe400078e0202 */ /*00d0*/ IMAD R13, R4, c[0x0][0x178], RZ ; /* 0x00005e00040d7a24 */ /* 0x000fe200078e02ff */ /*00e0*/ LEA R0, R0, R5, 0x3 ; /* 0x0000000500007211 */ /* 0x004fe200078e18ff */ /*00f0*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */ /* 0x000fc800000001ff */ /*0100*/ IMAD R0, R3, c[0x0][0x170], R0 ; /* 0x00005c0003007a24 */ /* 0x000fca00078e0200 */ /*0110*/ IADD3 R2, R13, R0, R13 ; /* 0x000000000d027210 */ /* 0x000fca0007ffe00d */ /*0120*/ IMAD.WIDE R2, R2, R5, c[0x0][0x168] ; /* 0x00005a0002027625 */ /* 0x000fc800078e0205 */ /*0130*/ IMAD.WIDE R4, R0, R5, c[0x0][0x168] ; /* 0x00005a0000047625 */ /* 0x000fc800078e0205 */ /*0140*/ IMAD.WIDE R8, R13.reuse, 0x4, R2 ; /* 0x000000040d087825 */ /* 0x040fe200078e0202 */ /*0150*/ LDG.E R14, [R4.64] ; /* 0x00000004040e7981 */ /* 0x000ea6000c1e1900 */ /*0160*/ IMAD.WIDE R6, R13, 0x4, R4 ; /* 0x000000040d067825 */ /* 0x000fe200078e0204 */ /*0170*/ LDG.E R15, [R8.64] ; /* 0x00000004080f7981 */ /* 0x000eaa000c1e1900 */ /*0180*/ LDG.E R7, [R6.64] ; /* 0x0000000406077981 */ /* 0x000ee2000c1e1900 */ /*0190*/ FMUL R16, R14, R15 ; /* 0x0000000f0e107220 */ /* 0x004fc80000400000 */ /*01a0*/ FFMA R10, -R7, R7, R16 ; /* 0x00000007070a7223 */ /* 0x008fca0000000110 */ /*01b0*/ FSETP.GT.AND P0, PT, R10, RZ, PT ; /* 0x000000ff0a00720b */ /* 0x000fc80003f04000 */ /*01c0*/ FSETP.GEU.OR P0, PT, R14, RZ, !P0 ; /* 0x000000ff0e00720b */ /* 0x000fda000470e400 */ /*01d0*/ @!P0 IMAD.WIDE R10, R13.reuse, 0x4, R8 ; /* 0x000000040d0a8825 */ /* 0x040fe200078e0208 */ /*01e0*/ @!P0 LDG.E R2, [R2.64] ; /* 0x0000000402028981 */ /* 0x000ea8000c1e1900 */ /*01f0*/ @!P0 LDG.E R17, [R10.64] ; /* 0x000000040a118981 */ /* 0x000ee2000c1e1900 */ /*0200*/ @!P0 IMAD.WIDE R12, R13, 0x4, R10 ; /* 0x000000040d0c8825 */ /* 0x000fcc00078e020a */ /*0210*/ @!P0 LDG.E R12, [R12.64] ; /* 0x000000040c0c8981 */ /* 0x000f22000c1e1900 */ /*0220*/ @!P0 FADD R4, R7, R7 ; /* 0x0000000707048221 */ /* 0x000fc80000000000 */ /*0230*/ @!P0 FMUL R5, R17, R4 ; /* 0x0000000411058220 */ /* 0x008fc80000400000 */ /*0240*/ @!P0 FMUL R5, R2, R5 ; /* 0x0000000502058220 */ /* 0x004fe40000400000 */ /*0250*/ @!P0 FMUL R14, R14, R17 ; /* 0x000000110e0e8220 */ /* 0x000fe40000400000 */ /*0260*/ @!P0 FFMA R16, R16, R12, R5 ; /* 0x0000000c10108223 */ /* 0x010fe40000000005 */ /*0270*/ @!P0 FMUL R15, R15, R2 ; /* 0x000000020f0f8220 */ /* 0x000fe40000400000 */ /*0280*/ @!P0 FFMA R17, -R17, R14, R16 ; /* 0x0000000e11118223 */ /* 0x000fe40000000110 */ /*0290*/ @!P0 FMUL R5, R7, R12 ; /* 0x0000000c07058220 */ /* 0x000fc40000400000 */ /*02a0*/ @!P0 FFMA R4, -R2, R15, R17 ; /* 0x0000000f02048223 */ /* 0x000fc80000000111 */ /*02b0*/ @!P0 FFMA R4, -R7, R5, R4 ; /* 0x0000000507048223 */ /* 0x000fe20000000104 */ /*02c0*/ IADD3 R2, P2, R0, c[0x0][0x160], RZ ; /* 0x0000580000027a10 */ /* 0x000fc80007f5e0ff */ /*02d0*/ @!P0 FSETP.GEU.AND P1, PT, R4, RZ, PT ; /* 0x000000ff0400820b */ /* 0x000fe40003f2e000 */ /*02e0*/ PRMT R5, RZ, 0x7610, R5 ; /* 0x00007610ff057816 */ /* 0x000fe40000000005 */ /*02f0*/ LEA.HI.X.SX32 R3, R0, c[0x0][0x164], 0x1, P2 ; /* 0x0000590000037a11 */ /* 0x000fe400010f0eff */ /*0300*/ @!P0 SEL R5, RZ, 0x1, P1 ; /* 0x00000001ff058807 */ /* 0x000fca0000800000 */ /*0310*/ STG.E.U8 [R2.64], R5 ; /* 0x0000000502007986 */ /* 0x000fe2000c101104 */ /*0320*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0330*/ BRA 0x330; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0340*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0350*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0360*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0370*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0380*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0390*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include "includes.h" __global__ void PositiveDefiniteKernel( char *hessian_pd, float *hessian, int imageW, int imageH, int imageD ) { const int baseX = blockIdx.x * PD_BLOCKDIM_X + threadIdx.x; const int baseY = blockIdx.y * PD_BLOCKDIM_Y + threadIdx.y; const int baseZ = blockIdx.z * PD_BLOCKDIM_Z + threadIdx.z; const int size = imageW * imageH * imageD; const int idx = (baseZ * imageH + baseY) * imageW + baseX; float xx = hessian[idx]; float xy = hessian[idx + size]; float xz = hessian[idx + size*2]; float yy = hessian[idx + size*3]; float yz = hessian[idx + size*4]; float zz = hessian[idx + size*5]; // Sylvester's criterion hessian_pd[idx] = ( xx < 0 && xx*yy-xy*xy > 0 && xx*yy*zz + 2*xy*yz*xz - xx*yz*yz - yy*xz*xz - zz*xy*xy < 0 ) ? 1 : 0; }
.file "tmpxft_00076eab_00000000-6_PositiveDefiniteKernel.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z47__device_stub__Z22PositiveDefiniteKernelPcPfiiiPcPfiii .type _Z47__device_stub__Z22PositiveDefiniteKernelPcPfiiiPcPfiii, @function _Z47__device_stub__Z22PositiveDefiniteKernelPcPfiiiPcPfiii: .LFB2051: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movl %edx, 12(%rsp) movl %ecx, 8(%rsp) movl %r8d, 4(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) leaq 8(%rsp), %rax movq %rax, 120(%rsp) leaq 4(%rsp), %rax movq %rax, 128(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 136(%rsp), %rax subq %fs:40, %rax jne .L8 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z22PositiveDefiniteKernelPcPfiii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z47__device_stub__Z22PositiveDefiniteKernelPcPfiiiPcPfiii, .-_Z47__device_stub__Z22PositiveDefiniteKernelPcPfiiiPcPfiii .globl _Z22PositiveDefiniteKernelPcPfiii .type _Z22PositiveDefiniteKernelPcPfiii, @function _Z22PositiveDefiniteKernelPcPfiii: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z47__device_stub__Z22PositiveDefiniteKernelPcPfiiiPcPfiii addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _Z22PositiveDefiniteKernelPcPfiii, .-_Z22PositiveDefiniteKernelPcPfiii .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC0: .string "_Z22PositiveDefiniteKernelPcPfiii" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z22PositiveDefiniteKernelPcPfiii(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include "includes.h" __global__ void PositiveDefiniteKernel( char *hessian_pd, float *hessian, int imageW, int imageH, int imageD ) { const int baseX = blockIdx.x * PD_BLOCKDIM_X + threadIdx.x; const int baseY = blockIdx.y * PD_BLOCKDIM_Y + threadIdx.y; const int baseZ = blockIdx.z * PD_BLOCKDIM_Z + threadIdx.z; const int size = imageW * imageH * imageD; const int idx = (baseZ * imageH + baseY) * imageW + baseX; float xx = hessian[idx]; float xy = hessian[idx + size]; float xz = hessian[idx + size*2]; float yy = hessian[idx + size*3]; float yz = hessian[idx + size*4]; float zz = hessian[idx + size*5]; // Sylvester's criterion hessian_pd[idx] = ( xx < 0 && xx*yy-xy*xy > 0 && xx*yy*zz + 2*xy*yz*xz - xx*yz*yz - yy*xz*xz - zz*xy*xy < 0 ) ? 1 : 0; }
#include <hip/hip_runtime.h> #include "includes.h" __global__ void PositiveDefiniteKernel( char *hessian_pd, float *hessian, int imageW, int imageH, int imageD ) { const int baseX = blockIdx.x * PD_BLOCKDIM_X + threadIdx.x; const int baseY = blockIdx.y * PD_BLOCKDIM_Y + threadIdx.y; const int baseZ = blockIdx.z * PD_BLOCKDIM_Z + threadIdx.z; const int size = imageW * imageH * imageD; const int idx = (baseZ * imageH + baseY) * imageW + baseX; float xx = hessian[idx]; float xy = hessian[idx + size]; float xz = hessian[idx + size*2]; float yy = hessian[idx + size*3]; float yz = hessian[idx + size*4]; float zz = hessian[idx + size*5]; // Sylvester's criterion hessian_pd[idx] = ( xx < 0 && xx*yy-xy*xy > 0 && xx*yy*zz + 2*xy*yz*xz - xx*yz*yz - yy*xz*xz - zz*xy*xy < 0 ) ? 1 : 0; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include "includes.h" __global__ void PositiveDefiniteKernel( char *hessian_pd, float *hessian, int imageW, int imageH, int imageD ) { const int baseX = blockIdx.x * PD_BLOCKDIM_X + threadIdx.x; const int baseY = blockIdx.y * PD_BLOCKDIM_Y + threadIdx.y; const int baseZ = blockIdx.z * PD_BLOCKDIM_Z + threadIdx.z; const int size = imageW * imageH * imageD; const int idx = (baseZ * imageH + baseY) * imageW + baseX; float xx = hessian[idx]; float xy = hessian[idx + size]; float xz = hessian[idx + size*2]; float yy = hessian[idx + size*3]; float yz = hessian[idx + size*4]; float zz = hessian[idx + size*5]; // Sylvester's criterion hessian_pd[idx] = ( xx < 0 && xx*yy-xy*xy > 0 && xx*yy*zz + 2*xy*yz*xz - xx*yz*yz - yy*xz*xz - zz*xy*xy < 0 ) ? 1 : 0; }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z22PositiveDefiniteKernelPcPfiii .globl _Z22PositiveDefiniteKernelPcPfiii .p2align 8 .type _Z22PositiveDefiniteKernelPcPfiii,@function _Z22PositiveDefiniteKernelPcPfiii: s_load_b128 s[4:7], s[0:1], 0x8 v_bfe_u32 v1, v0, 20, 10 v_bfe_u32 v2, v0, 10, 10 s_lshl_b32 s2, s14, 3 v_and_b32_e32 v0, 0x3ff, v0 s_mov_b32 s3, 0 v_lshl_add_u32 v1, s15, 3, v1 s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mul_lo_u32 v1, v1, s7 v_add3_u32 v1, s2, v2, v1 s_lshl_b32 s2, s13, 3 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mul_lo_u32 v1, v1, s6 v_add3_u32 v0, s2, v0, v1 s_mov_b32 s2, exec_lo s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v1, 31, v0 v_lshlrev_b64 v[2:3], 2, v[0:1] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v2, vcc_lo, s4, v2 v_add_co_ci_u32_e32 v3, vcc_lo, s5, v3, vcc_lo global_load_b32 v2, v[2:3], off s_waitcnt vmcnt(0) v_cmpx_gt_f32_e32 0, v2 s_cbranch_execz .LBB0_4 s_load_b32 s3, s[0:1], 0x18 s_mul_i32 s6, s7, s6 s_mov_b32 s7, 0 s_waitcnt lgkmcnt(0) s_mul_i32 s6, s6, s3 s_mov_b32 s3, exec_lo v_mad_u64_u32 v[3:4], null, s6, 3, v[0:1] v_add_nc_u32_e32 v5, s6, v0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) v_ashrrev_i32_e32 v6, 31, v5 v_ashrrev_i32_e32 v4, 31, v3 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_lshlrev_b64 v[5:6], 2, v[5:6] v_lshlrev_b64 v[3:4], 2, v[3:4] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v3, vcc_lo, s4, v3 v_add_co_ci_u32_e32 v4, vcc_lo, s5, v4, vcc_lo s_delay_alu instid0(VALU_DEP_4) v_add_co_u32 v5, vcc_lo, s4, v5 v_add_co_ci_u32_e32 v6, vcc_lo, s5, v6, vcc_lo s_clause 0x1 global_load_b32 v4, v[3:4], off global_load_b32 v3, v[5:6], off s_waitcnt vmcnt(1) v_mul_f32_e32 v5, v2, v4 s_waitcnt vmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_fma_f32 v6, -v3, v3, v5 v_cmpx_lt_f32_e32 0, v6 s_cbranch_execz .LBB0_3 v_lshl_add_u32 v6, s6, 2, v0 v_lshl_add_u32 v8, s6, 1, v0 v_mad_u64_u32 v[10:11], null, s6, 5, v[0:1] s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) v_ashrrev_i32_e32 v7, 31, v6 v_ashrrev_i32_e32 v9, 31, v8 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) v_ashrrev_i32_e32 v11, 31, v10 v_lshlrev_b64 v[6:7], 2, v[6:7] s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) v_lshlrev_b64 v[8:9], 2, v[8:9] v_lshlrev_b64 v[10:11], 2, v[10:11] s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4) v_add_co_u32 v6, vcc_lo, s4, v6 v_add_co_ci_u32_e32 v7, vcc_lo, s5, v7, vcc_lo s_delay_alu instid0(VALU_DEP_4) v_add_co_u32 v8, vcc_lo, s4, v8 v_add_co_ci_u32_e32 v9, vcc_lo, s5, v9, vcc_lo s_clause 0x1 global_load_b32 v12, v[6:7], off global_load_b32 v8, v[8:9], off v_add_co_u32 v6, vcc_lo, s4, v10 v_add_co_ci_u32_e32 v7, vcc_lo, s5, v11, vcc_lo global_load_b32 v6, v[6:7], off s_waitcnt vmcnt(2) v_dual_add_f32 v7, v3, v3 :: v_dual_mul_f32 v2, v2, v12 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2) v_mul_f32_e32 v7, v7, v12 s_waitcnt vmcnt(1) v_mul_f32_e32 v4, v8, v4 v_mul_f32_e32 v7, v8, v7 s_waitcnt vmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_fmac_f32_e32 v7, v5, v6 v_fma_f32 v2, -v12, v2, v7 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_fma_f32 v2, -v8, v4, v2 v_mul_f32_e32 v4, v3, v6 v_fma_f32 v2, -v3, v4, v2 s_delay_alu instid0(VALU_DEP_1) v_cmp_gt_f32_e32 vcc_lo, 0, v2 s_and_b32 s7, vcc_lo, exec_lo .LBB0_3: s_or_b32 exec_lo, exec_lo, s3 s_delay_alu instid0(SALU_CYCLE_1) s_and_b32 s3, s7, exec_lo .LBB0_4: s_or_b32 exec_lo, exec_lo, s2 s_load_b64 s[0:1], s[0:1], 0x0 v_cndmask_b32_e64 v2, 0, 1, s3 s_waitcnt lgkmcnt(0) v_add_co_u32 v0, vcc_lo, s0, v0 v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo global_store_b8 v[0:1], v2, off s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z22PositiveDefiniteKernelPcPfiii .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 28 .amdhsa_user_sgpr_count 13 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 1 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 2 .amdhsa_next_free_vgpr 13 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z22PositiveDefiniteKernelPcPfiii, .Lfunc_end0-_Z22PositiveDefiniteKernelPcPfiii .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: by_value - .offset: 20 .size: 4 .value_kind: by_value - .offset: 24 .size: 4 .value_kind: by_value .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 28 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z22PositiveDefiniteKernelPcPfiii .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z22PositiveDefiniteKernelPcPfiii.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 13 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include "includes.h" __global__ void PositiveDefiniteKernel( char *hessian_pd, float *hessian, int imageW, int imageH, int imageD ) { const int baseX = blockIdx.x * PD_BLOCKDIM_X + threadIdx.x; const int baseY = blockIdx.y * PD_BLOCKDIM_Y + threadIdx.y; const int baseZ = blockIdx.z * PD_BLOCKDIM_Z + threadIdx.z; const int size = imageW * imageH * imageD; const int idx = (baseZ * imageH + baseY) * imageW + baseX; float xx = hessian[idx]; float xy = hessian[idx + size]; float xz = hessian[idx + size*2]; float yy = hessian[idx + size*3]; float yz = hessian[idx + size*4]; float zz = hessian[idx + size*5]; // Sylvester's criterion hessian_pd[idx] = ( xx < 0 && xx*yy-xy*xy > 0 && xx*yy*zz + 2*xy*yz*xz - xx*yz*yz - yy*xz*xz - zz*xy*xy < 0 ) ? 1 : 0; }
.text .file "PositiveDefiniteKernel.hip" .globl _Z37__device_stub__PositiveDefiniteKernelPcPfiii # -- Begin function _Z37__device_stub__PositiveDefiniteKernelPcPfiii .p2align 4, 0x90 .type _Z37__device_stub__PositiveDefiniteKernelPcPfiii,@function _Z37__device_stub__PositiveDefiniteKernelPcPfiii: # @_Z37__device_stub__PositiveDefiniteKernelPcPfiii .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movl %edx, 12(%rsp) movl %ecx, 8(%rsp) movl %r8d, 4(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 8(%rsp), %rax movq %rax, 104(%rsp) leaq 4(%rsp), %rax movq %rax, 112(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z22PositiveDefiniteKernelPcPfiii, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end0: .size _Z37__device_stub__PositiveDefiniteKernelPcPfiii, .Lfunc_end0-_Z37__device_stub__PositiveDefiniteKernelPcPfiii .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z22PositiveDefiniteKernelPcPfiii, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z22PositiveDefiniteKernelPcPfiii,@object # @_Z22PositiveDefiniteKernelPcPfiii .section .rodata,"a",@progbits .globl _Z22PositiveDefiniteKernelPcPfiii .p2align 3, 0x0 _Z22PositiveDefiniteKernelPcPfiii: .quad _Z37__device_stub__PositiveDefiniteKernelPcPfiii .size _Z22PositiveDefiniteKernelPcPfiii, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z22PositiveDefiniteKernelPcPfiii" .size .L__unnamed_1, 34 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z37__device_stub__PositiveDefiniteKernelPcPfiii .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z22PositiveDefiniteKernelPcPfiii .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z22PositiveDefiniteKernelPcPfiii .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R2, SR_CTAID.Y ; /* 0x0000000000027919 */ /* 0x000e220000002600 */ /*0020*/ MOV R6, c[0x0][0x174] ; /* 0x00005d0000067a02 */ /* 0x000fe20000000f00 */ /*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe40000000a00 */ /*0040*/ S2R R3, SR_TID.Y ; /* 0x0000000000037919 */ /* 0x000e280000002200 */ /*0050*/ S2R R4, SR_CTAID.Z ; /* 0x0000000000047919 */ /* 0x000e680000002700 */ /*0060*/ S2R R7, SR_TID.Z ; /* 0x0000000000077919 */ /* 0x000e680000002300 */ /*0070*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */ /* 0x000ea80000002500 */ /*0080*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */ /* 0x000ea20000002100 */ /*0090*/ LEA R2, R2, R3, 0x3 ; /* 0x0000000302027211 */ /* 0x001fc400078e18ff */ /*00a0*/ LEA R3, R4, R7, 0x3 ; /* 0x0000000704037211 */ /* 0x002fe200078e18ff */ /*00b0*/ IMAD R4, R6, c[0x0][0x170], RZ ; /* 0x00005c0006047a24 */ /* 0x000fc800078e02ff */ /*00c0*/ IMAD R3, R3, c[0x0][0x174], R2 ; /* 0x00005d0003037a24 */ /* 0x000fe400078e0202 */ /*00d0*/ IMAD R13, R4, c[0x0][0x178], RZ ; /* 0x00005e00040d7a24 */ /* 0x000fe200078e02ff */ /*00e0*/ LEA R0, R0, R5, 0x3 ; /* 0x0000000500007211 */ /* 0x004fe200078e18ff */ /*00f0*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */ /* 0x000fc800000001ff */ /*0100*/ IMAD R0, R3, c[0x0][0x170], R0 ; /* 0x00005c0003007a24 */ /* 0x000fca00078e0200 */ /*0110*/ IADD3 R2, R13, R0, R13 ; /* 0x000000000d027210 */ /* 0x000fca0007ffe00d */ /*0120*/ IMAD.WIDE R2, R2, R5, c[0x0][0x168] ; /* 0x00005a0002027625 */ /* 0x000fc800078e0205 */ /*0130*/ IMAD.WIDE R4, R0, R5, c[0x0][0x168] ; /* 0x00005a0000047625 */ /* 0x000fc800078e0205 */ /*0140*/ IMAD.WIDE R8, R13.reuse, 0x4, R2 ; /* 0x000000040d087825 */ /* 0x040fe200078e0202 */ /*0150*/ LDG.E R14, [R4.64] ; /* 0x00000004040e7981 */ /* 0x000ea6000c1e1900 */ /*0160*/ IMAD.WIDE R6, R13, 0x4, R4 ; /* 0x000000040d067825 */ /* 0x000fe200078e0204 */ /*0170*/ LDG.E R15, [R8.64] ; /* 0x00000004080f7981 */ /* 0x000eaa000c1e1900 */ /*0180*/ LDG.E R7, [R6.64] ; /* 0x0000000406077981 */ /* 0x000ee2000c1e1900 */ /*0190*/ FMUL R16, R14, R15 ; /* 0x0000000f0e107220 */ /* 0x004fc80000400000 */ /*01a0*/ FFMA R10, -R7, R7, R16 ; /* 0x00000007070a7223 */ /* 0x008fca0000000110 */ /*01b0*/ FSETP.GT.AND P0, PT, R10, RZ, PT ; /* 0x000000ff0a00720b */ /* 0x000fc80003f04000 */ /*01c0*/ FSETP.GEU.OR P0, PT, R14, RZ, !P0 ; /* 0x000000ff0e00720b */ /* 0x000fda000470e400 */ /*01d0*/ @!P0 IMAD.WIDE R10, R13.reuse, 0x4, R8 ; /* 0x000000040d0a8825 */ /* 0x040fe200078e0208 */ /*01e0*/ @!P0 LDG.E R2, [R2.64] ; /* 0x0000000402028981 */ /* 0x000ea8000c1e1900 */ /*01f0*/ @!P0 LDG.E R17, [R10.64] ; /* 0x000000040a118981 */ /* 0x000ee2000c1e1900 */ /*0200*/ @!P0 IMAD.WIDE R12, R13, 0x4, R10 ; /* 0x000000040d0c8825 */ /* 0x000fcc00078e020a */ /*0210*/ @!P0 LDG.E R12, [R12.64] ; /* 0x000000040c0c8981 */ /* 0x000f22000c1e1900 */ /*0220*/ @!P0 FADD R4, R7, R7 ; /* 0x0000000707048221 */ /* 0x000fc80000000000 */ /*0230*/ @!P0 FMUL R5, R17, R4 ; /* 0x0000000411058220 */ /* 0x008fc80000400000 */ /*0240*/ @!P0 FMUL R5, R2, R5 ; /* 0x0000000502058220 */ /* 0x004fe40000400000 */ /*0250*/ @!P0 FMUL R14, R14, R17 ; /* 0x000000110e0e8220 */ /* 0x000fe40000400000 */ /*0260*/ @!P0 FFMA R16, R16, R12, R5 ; /* 0x0000000c10108223 */ /* 0x010fe40000000005 */ /*0270*/ @!P0 FMUL R15, R15, R2 ; /* 0x000000020f0f8220 */ /* 0x000fe40000400000 */ /*0280*/ @!P0 FFMA R17, -R17, R14, R16 ; /* 0x0000000e11118223 */ /* 0x000fe40000000110 */ /*0290*/ @!P0 FMUL R5, R7, R12 ; /* 0x0000000c07058220 */ /* 0x000fc40000400000 */ /*02a0*/ @!P0 FFMA R4, -R2, R15, R17 ; /* 0x0000000f02048223 */ /* 0x000fc80000000111 */ /*02b0*/ @!P0 FFMA R4, -R7, R5, R4 ; /* 0x0000000507048223 */ /* 0x000fe20000000104 */ /*02c0*/ IADD3 R2, P2, R0, c[0x0][0x160], RZ ; /* 0x0000580000027a10 */ /* 0x000fc80007f5e0ff */ /*02d0*/ @!P0 FSETP.GEU.AND P1, PT, R4, RZ, PT ; /* 0x000000ff0400820b */ /* 0x000fe40003f2e000 */ /*02e0*/ PRMT R5, RZ, 0x7610, R5 ; /* 0x00007610ff057816 */ /* 0x000fe40000000005 */ /*02f0*/ LEA.HI.X.SX32 R3, R0, c[0x0][0x164], 0x1, P2 ; /* 0x0000590000037a11 */ /* 0x000fe400010f0eff */ /*0300*/ @!P0 SEL R5, RZ, 0x1, P1 ; /* 0x00000001ff058807 */ /* 0x000fca0000800000 */ /*0310*/ STG.E.U8 [R2.64], R5 ; /* 0x0000000502007986 */ /* 0x000fe2000c101104 */ /*0320*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0330*/ BRA 0x330; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0340*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0350*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0360*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0370*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0380*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0390*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z22PositiveDefiniteKernelPcPfiii .globl _Z22PositiveDefiniteKernelPcPfiii .p2align 8 .type _Z22PositiveDefiniteKernelPcPfiii,@function _Z22PositiveDefiniteKernelPcPfiii: s_load_b128 s[4:7], s[0:1], 0x8 v_bfe_u32 v1, v0, 20, 10 v_bfe_u32 v2, v0, 10, 10 s_lshl_b32 s2, s14, 3 v_and_b32_e32 v0, 0x3ff, v0 s_mov_b32 s3, 0 v_lshl_add_u32 v1, s15, 3, v1 s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mul_lo_u32 v1, v1, s7 v_add3_u32 v1, s2, v2, v1 s_lshl_b32 s2, s13, 3 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mul_lo_u32 v1, v1, s6 v_add3_u32 v0, s2, v0, v1 s_mov_b32 s2, exec_lo s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_ashrrev_i32_e32 v1, 31, v0 v_lshlrev_b64 v[2:3], 2, v[0:1] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v2, vcc_lo, s4, v2 v_add_co_ci_u32_e32 v3, vcc_lo, s5, v3, vcc_lo global_load_b32 v2, v[2:3], off s_waitcnt vmcnt(0) v_cmpx_gt_f32_e32 0, v2 s_cbranch_execz .LBB0_4 s_load_b32 s3, s[0:1], 0x18 s_mul_i32 s6, s7, s6 s_mov_b32 s7, 0 s_waitcnt lgkmcnt(0) s_mul_i32 s6, s6, s3 s_mov_b32 s3, exec_lo v_mad_u64_u32 v[3:4], null, s6, 3, v[0:1] v_add_nc_u32_e32 v5, s6, v0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) v_ashrrev_i32_e32 v6, 31, v5 v_ashrrev_i32_e32 v4, 31, v3 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_lshlrev_b64 v[5:6], 2, v[5:6] v_lshlrev_b64 v[3:4], 2, v[3:4] s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v3, vcc_lo, s4, v3 v_add_co_ci_u32_e32 v4, vcc_lo, s5, v4, vcc_lo s_delay_alu instid0(VALU_DEP_4) v_add_co_u32 v5, vcc_lo, s4, v5 v_add_co_ci_u32_e32 v6, vcc_lo, s5, v6, vcc_lo s_clause 0x1 global_load_b32 v4, v[3:4], off global_load_b32 v3, v[5:6], off s_waitcnt vmcnt(1) v_mul_f32_e32 v5, v2, v4 s_waitcnt vmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_fma_f32 v6, -v3, v3, v5 v_cmpx_lt_f32_e32 0, v6 s_cbranch_execz .LBB0_3 v_lshl_add_u32 v6, s6, 2, v0 v_lshl_add_u32 v8, s6, 1, v0 v_mad_u64_u32 v[10:11], null, s6, 5, v[0:1] s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) v_ashrrev_i32_e32 v7, 31, v6 v_ashrrev_i32_e32 v9, 31, v8 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) v_ashrrev_i32_e32 v11, 31, v10 v_lshlrev_b64 v[6:7], 2, v[6:7] s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) v_lshlrev_b64 v[8:9], 2, v[8:9] v_lshlrev_b64 v[10:11], 2, v[10:11] s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4) v_add_co_u32 v6, vcc_lo, s4, v6 v_add_co_ci_u32_e32 v7, vcc_lo, s5, v7, vcc_lo s_delay_alu instid0(VALU_DEP_4) v_add_co_u32 v8, vcc_lo, s4, v8 v_add_co_ci_u32_e32 v9, vcc_lo, s5, v9, vcc_lo s_clause 0x1 global_load_b32 v12, v[6:7], off global_load_b32 v8, v[8:9], off v_add_co_u32 v6, vcc_lo, s4, v10 v_add_co_ci_u32_e32 v7, vcc_lo, s5, v11, vcc_lo global_load_b32 v6, v[6:7], off s_waitcnt vmcnt(2) v_dual_add_f32 v7, v3, v3 :: v_dual_mul_f32 v2, v2, v12 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2) v_mul_f32_e32 v7, v7, v12 s_waitcnt vmcnt(1) v_mul_f32_e32 v4, v8, v4 v_mul_f32_e32 v7, v8, v7 s_waitcnt vmcnt(0) s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_fmac_f32_e32 v7, v5, v6 v_fma_f32 v2, -v12, v2, v7 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_fma_f32 v2, -v8, v4, v2 v_mul_f32_e32 v4, v3, v6 v_fma_f32 v2, -v3, v4, v2 s_delay_alu instid0(VALU_DEP_1) v_cmp_gt_f32_e32 vcc_lo, 0, v2 s_and_b32 s7, vcc_lo, exec_lo .LBB0_3: s_or_b32 exec_lo, exec_lo, s3 s_delay_alu instid0(SALU_CYCLE_1) s_and_b32 s3, s7, exec_lo .LBB0_4: s_or_b32 exec_lo, exec_lo, s2 s_load_b64 s[0:1], s[0:1], 0x0 v_cndmask_b32_e64 v2, 0, 1, s3 s_waitcnt lgkmcnt(0) v_add_co_u32 v0, vcc_lo, s0, v0 v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo global_store_b8 v[0:1], v2, off s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z22PositiveDefiniteKernelPcPfiii .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 28 .amdhsa_user_sgpr_count 13 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 1 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 2 .amdhsa_next_free_vgpr 13 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z22PositiveDefiniteKernelPcPfiii, .Lfunc_end0-_Z22PositiveDefiniteKernelPcPfiii .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: by_value - .offset: 20 .size: 4 .value_kind: by_value - .offset: 24 .size: 4 .value_kind: by_value .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 28 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z22PositiveDefiniteKernelPcPfiii .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z22PositiveDefiniteKernelPcPfiii.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 13 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_00076eab_00000000-6_PositiveDefiniteKernel.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z47__device_stub__Z22PositiveDefiniteKernelPcPfiiiPcPfiii .type _Z47__device_stub__Z22PositiveDefiniteKernelPcPfiiiPcPfiii, @function _Z47__device_stub__Z22PositiveDefiniteKernelPcPfiiiPcPfiii: .LFB2051: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movl %edx, 12(%rsp) movl %ecx, 8(%rsp) movl %r8d, 4(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) leaq 8(%rsp), %rax movq %rax, 120(%rsp) leaq 4(%rsp), %rax movq %rax, 128(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 136(%rsp), %rax subq %fs:40, %rax jne .L8 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z22PositiveDefiniteKernelPcPfiii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z47__device_stub__Z22PositiveDefiniteKernelPcPfiiiPcPfiii, .-_Z47__device_stub__Z22PositiveDefiniteKernelPcPfiiiPcPfiii .globl _Z22PositiveDefiniteKernelPcPfiii .type _Z22PositiveDefiniteKernelPcPfiii, @function _Z22PositiveDefiniteKernelPcPfiii: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z47__device_stub__Z22PositiveDefiniteKernelPcPfiiiPcPfiii addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _Z22PositiveDefiniteKernelPcPfiii, .-_Z22PositiveDefiniteKernelPcPfiii .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC0: .string "_Z22PositiveDefiniteKernelPcPfiii" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z22PositiveDefiniteKernelPcPfiii(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "PositiveDefiniteKernel.hip" .globl _Z37__device_stub__PositiveDefiniteKernelPcPfiii # -- Begin function _Z37__device_stub__PositiveDefiniteKernelPcPfiii .p2align 4, 0x90 .type _Z37__device_stub__PositiveDefiniteKernelPcPfiii,@function _Z37__device_stub__PositiveDefiniteKernelPcPfiii: # @_Z37__device_stub__PositiveDefiniteKernelPcPfiii .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movl %edx, 12(%rsp) movl %ecx, 8(%rsp) movl %r8d, 4(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 8(%rsp), %rax movq %rax, 104(%rsp) leaq 4(%rsp), %rax movq %rax, 112(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z22PositiveDefiniteKernelPcPfiii, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end0: .size _Z37__device_stub__PositiveDefiniteKernelPcPfiii, .Lfunc_end0-_Z37__device_stub__PositiveDefiniteKernelPcPfiii .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z22PositiveDefiniteKernelPcPfiii, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z22PositiveDefiniteKernelPcPfiii,@object # @_Z22PositiveDefiniteKernelPcPfiii .section .rodata,"a",@progbits .globl _Z22PositiveDefiniteKernelPcPfiii .p2align 3, 0x0 _Z22PositiveDefiniteKernelPcPfiii: .quad _Z37__device_stub__PositiveDefiniteKernelPcPfiii .size _Z22PositiveDefiniteKernelPcPfiii, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z22PositiveDefiniteKernelPcPfiii" .size .L__unnamed_1, 34 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z37__device_stub__PositiveDefiniteKernelPcPfiii .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z22PositiveDefiniteKernelPcPfiii .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include <stdio.h> #include <stdlib.h> #define MAX_ITER 100 #define MAX 100 //maximum value of the matrix element #define TOL 0.000001 // Generate a random float number with the maximum value of max float rand_float(int max){ return ((float)rand()/(float)(RAND_MAX)) * max; } // Allocate 2D matrix void allocate_init_2Dmatrix(float ***mat, int n, int m){ int i, j; *mat = (float **) malloc(n * sizeof(float *)); for(i = 0; i < n; i++) { (*mat)[i] = (float *)malloc(m * sizeof(float)); for (j = 0; j < m; j++) (*mat)[i][j] = rand_float(MAX); } } // solver void solver(float ***mat, int n, int m){ float diff = 0, temp; int done = 0, cnt_iter = 0, i, j; while (!done && (cnt_iter < MAX_ITER)){ diff = 0; for (i = 1; i < n - 1; i++) for (j = 1; j < m - 1; j++){ temp = (*mat)[i][j]; (*mat)[i][j] = 0.2 * ((*mat)[i][j] + (*mat)[i][j - 1] + (*mat)[i - 1][j] + (*mat)[i][j + 1] + (*mat)[i + 1][j]); diff += abs((*mat)[i][j] - temp); } if (diff/n/n < TOL) done = 1; cnt_iter ++; } if (done) printf("Solver converged after %d iterations\n", cnt_iter); else printf("Solver not converged after %d iterations\n", cnt_iter); } int main(int argc, char *argv[]) { int n, communication; float **a; if (argc < 2) { printf("Call this program with two parameters: matrix_size communication \n"); printf("\t matrix_size: Add 2 to a power of 2 (e.g. : 18, 1026)\n"); exit(1); } n = atoi(argv[1]); printf("Matrix size = %d\n", n); allocate_init_2Dmatrix(&a, n, n); solver(&a, n, n); return 0; }
code for sm_80
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include <stdio.h> #include <stdlib.h> #define MAX_ITER 100 #define MAX 100 //maximum value of the matrix element #define TOL 0.000001 // Generate a random float number with the maximum value of max float rand_float(int max){ return ((float)rand()/(float)(RAND_MAX)) * max; } // Allocate 2D matrix void allocate_init_2Dmatrix(float ***mat, int n, int m){ int i, j; *mat = (float **) malloc(n * sizeof(float *)); for(i = 0; i < n; i++) { (*mat)[i] = (float *)malloc(m * sizeof(float)); for (j = 0; j < m; j++) (*mat)[i][j] = rand_float(MAX); } } // solver void solver(float ***mat, int n, int m){ float diff = 0, temp; int done = 0, cnt_iter = 0, i, j; while (!done && (cnt_iter < MAX_ITER)){ diff = 0; for (i = 1; i < n - 1; i++) for (j = 1; j < m - 1; j++){ temp = (*mat)[i][j]; (*mat)[i][j] = 0.2 * ((*mat)[i][j] + (*mat)[i][j - 1] + (*mat)[i - 1][j] + (*mat)[i][j + 1] + (*mat)[i + 1][j]); diff += abs((*mat)[i][j] - temp); } if (diff/n/n < TOL) done = 1; cnt_iter ++; } if (done) printf("Solver converged after %d iterations\n", cnt_iter); else printf("Solver not converged after %d iterations\n", cnt_iter); } int main(int argc, char *argv[]) { int n, communication; float **a; if (argc < 2) { printf("Call this program with two parameters: matrix_size communication \n"); printf("\t matrix_size: Add 2 to a power of 2 (e.g. : 18, 1026)\n"); exit(1); } n = atoi(argv[1]); printf("Matrix size = %d\n", n); allocate_init_2Dmatrix(&a, n, n); solver(&a, n, n); return 0; }
.file "tmpxft_0016a701_00000000-6_gs_seq.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2063: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2063: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z10rand_floati .type _Z10rand_floati, @function _Z10rand_floati: .LFB2057: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 movl %edi, %ebx call rand@PLT pxor %xmm0, %xmm0 cvtsi2ssl %eax, %xmm0 mulss .LC0(%rip), %xmm0 pxor %xmm1, %xmm1 cvtsi2ssl %ebx, %xmm1 mulss %xmm1, %xmm0 popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2057: .size _Z10rand_floati, .-_Z10rand_floati .globl _Z22allocate_init_2DmatrixPPPfii .type _Z22allocate_init_2DmatrixPPPfii, @function _Z22allocate_init_2DmatrixPPPfii: .LFB2058: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $24, %rsp .cfi_def_cfa_offset 80 movq %rdi, %r13 movl %esi, %ebx movl %edx, %r15d movslq %esi, %rax salq $3, %rax movq %rax, 8(%rsp) movq %rax, %rdi call malloc@PLT movq %rax, 0(%r13) testl %ebx, %ebx jle .L5 movslq %r15d, %r14 salq $2, %r14 movl $0, %r12d .L9: movq %r12, %rbx addq 0(%r13), %rbx movq %r14, %rdi call malloc@PLT movq %rax, (%rbx) testl %r15d, %r15d jle .L7 movl $0, %ebx .L8: movq 0(%r13), %rax movq %rbx, %rbp addq (%rax,%r12), %rbp movl $100, %edi call _Z10rand_floati movss %xmm0, 0(%rbp) addq $4, %rbx cmpq %rbx, %r14 jne .L8 .L7: addq $8, %r12 cmpq %r12, 8(%rsp) jne .L9 .L5: addq $24, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2058: .size _Z22allocate_init_2DmatrixPPPfii, .-_Z22allocate_init_2DmatrixPPPfii .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC5: .string "Solver converged after %d iterations\n" .align 8 .LC6: .string "Solver not converged after %d iterations\n" .text .globl _Z6solverPPPfii .type _Z6solverPPPfii, @function _Z6solverPPPfii: .LFB2059: .cfi_startproc endbr64 pushq %r14 .cfi_def_cfa_offset 16 .cfi_offset 14, -16 pushq %r13 .cfi_def_cfa_offset 24 .cfi_offset 13, -24 pushq %r12 .cfi_def_cfa_offset 32 .cfi_offset 12, -32 pushq %rbp .cfi_def_cfa_offset 40 .cfi_offset 6, -40 pushq %rbx .cfi_def_cfa_offset 48 .cfi_offset 3, -48 movl %esi, %r8d movl %edx, %ecx leal -3(%rsi), %eax leaq 16(,%rax,8), %r11 leal -3(%rdx), %eax leaq 8(,%rax,4), %r10 movl $0, %edx movsd .LC2(%rip), %xmm4 movss .LC3(%rip), %xmm3 movsd .LC4(%rip), %xmm5 movl %ecx, %r9d jmp .L14 .L15: movq (%rdi), %rbp movq 0(%rbp,%rbx), %r14 leaq (%r14,%rax), %rsi movss (%rsi), %xmm1 movaps %xmm1, %xmm0 addss -4(%r14,%rax), %xmm0 movq 0(%rbp,%r13), %rcx addss (%rcx,%rax), %xmm0 movq %rax, %rcx addq $4, %rax movq 0(%rbp,%r12), %rbp addss (%r14,%rax), %xmm0 addss 0(%rbp,%rcx), %xmm0 cvtss2sd %xmm0, %xmm0 mulsd %xmm4, %xmm0 cvtsd2ss %xmm0, %xmm0 movss %xmm0, (%rsi) movq (%rdi), %rsi movq (%rsi,%rbx), %rsi movss (%rsi,%rcx), %xmm0 subss %xmm1, %xmm0 andps %xmm3, %xmm0 addss %xmm0, %xmm2 cmpq %r10, %rax jne .L15 .L18: addq $8, %rbx cmpq %rbx, %r11 je .L16 .L21: leaq -8(%rbx), %r13 leaq 8(%rbx), %r12 movl $4, %eax cmpl $2, %r9d jg .L15 jmp .L18 .L16: pxor %xmm0, %xmm0 cvtsi2ssl %r8d, %xmm0 divss %xmm0, %xmm2 divss %xmm0, %xmm2 pxor %xmm0, %xmm0 cvtss2sd %xmm2, %xmm0 comisd %xmm0, %xmm5 ja .L19 addl $1, %edx cmpl $100, %edx je .L20 .L14: movl $8, %ebx pxor %xmm2, %xmm2 cmpl $2, %r8d jg .L21 jmp .L16 .L19: addl $1, %edx leaq .LC5(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT .L13: popq %rbx .cfi_remember_state .cfi_def_cfa_offset 40 popq %rbp .cfi_def_cfa_offset 32 popq %r12 .cfi_def_cfa_offset 24 popq %r13 .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 ret .L20: .cfi_restore_state movl $100, %edx leaq .LC6(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT jmp .L13 .cfi_endproc .LFE2059: .size _Z6solverPPPfii, .-_Z6solverPPPfii .section .rodata.str1.8 .align 8 .LC7: .string "Call this program with two parameters: matrix_size communication \n" .align 8 .LC8: .string "\t matrix_size: Add 2 to a power of 2 (e.g. : 18, 1026)\n" .section .rodata.str1.1,"aMS",@progbits,1 .LC9: .string "Matrix size = %d\n" .text .globl main .type main, @function main: .LFB2060: .cfi_startproc endbr64 pushq %rbp .cfi_def_cfa_offset 16 .cfi_offset 6, -16 pushq %rbx .cfi_def_cfa_offset 24 .cfi_offset 3, -24 subq $24, %rsp .cfi_def_cfa_offset 48 movq %fs:40, %rax movq %rax, 8(%rsp) xorl %eax, %eax cmpl $1, %edi jle .L35 movq 8(%rsi), %rdi movl $10, %edx movl $0, %esi call __isoc23_strtol@PLT movl %eax, %ebx movl %eax, %edx leaq .LC9(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movq %rsp, %rbp movl %ebx, %edx movl %ebx, %esi movq %rbp, %rdi call _Z22allocate_init_2DmatrixPPPfii movl %ebx, %edx movl %ebx, %esi movq %rbp, %rdi call _Z6solverPPPfii movq 8(%rsp), %rax subq %fs:40, %rax jne .L36 movl $0, %eax addq $24, %rsp .cfi_remember_state .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 ret .L35: .cfi_restore_state leaq .LC7(%rip), %rsi movl $2, %edi call __printf_chk@PLT leaq .LC8(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $1, %edi call exit@PLT .L36: call __stack_chk_fail@PLT .cfi_endproc .LFE2060: .size main, .-main .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2086: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2086: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .section .rodata.cst4,"aM",@progbits,4 .align 4 .LC0: .long 805306368 .section .rodata.cst8,"aM",@progbits,8 .align 8 .LC2: .long -1717986918 .long 1070176665 .section .rodata.cst16,"aM",@progbits,16 .align 16 .LC3: .long 2147483647 .long 0 .long 0 .long 0 .section .rodata.cst8 .align 8 .LC4: .long -1598689907 .long 1051772663 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <stdio.h> #include <stdlib.h> #define MAX_ITER 100 #define MAX 100 //maximum value of the matrix element #define TOL 0.000001 // Generate a random float number with the maximum value of max float rand_float(int max){ return ((float)rand()/(float)(RAND_MAX)) * max; } // Allocate 2D matrix void allocate_init_2Dmatrix(float ***mat, int n, int m){ int i, j; *mat = (float **) malloc(n * sizeof(float *)); for(i = 0; i < n; i++) { (*mat)[i] = (float *)malloc(m * sizeof(float)); for (j = 0; j < m; j++) (*mat)[i][j] = rand_float(MAX); } } // solver void solver(float ***mat, int n, int m){ float diff = 0, temp; int done = 0, cnt_iter = 0, i, j; while (!done && (cnt_iter < MAX_ITER)){ diff = 0; for (i = 1; i < n - 1; i++) for (j = 1; j < m - 1; j++){ temp = (*mat)[i][j]; (*mat)[i][j] = 0.2 * ((*mat)[i][j] + (*mat)[i][j - 1] + (*mat)[i - 1][j] + (*mat)[i][j + 1] + (*mat)[i + 1][j]); diff += abs((*mat)[i][j] - temp); } if (diff/n/n < TOL) done = 1; cnt_iter ++; } if (done) printf("Solver converged after %d iterations\n", cnt_iter); else printf("Solver not converged after %d iterations\n", cnt_iter); } int main(int argc, char *argv[]) { int n, communication; float **a; if (argc < 2) { printf("Call this program with two parameters: matrix_size communication \n"); printf("\t matrix_size: Add 2 to a power of 2 (e.g. : 18, 1026)\n"); exit(1); } n = atoi(argv[1]); printf("Matrix size = %d\n", n); allocate_init_2Dmatrix(&a, n, n); solver(&a, n, n); return 0; }
#include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #define MAX_ITER 100 #define MAX 100 //maximum value of the matrix element #define TOL 0.000001 // Generate a random float number with the maximum value of max float rand_float(int max){ return ((float)rand()/(float)(RAND_MAX)) * max; } // Allocate 2D matrix void allocate_init_2Dmatrix(float ***mat, int n, int m){ int i, j; *mat = (float **) malloc(n * sizeof(float *)); for(i = 0; i < n; i++) { (*mat)[i] = (float *)malloc(m * sizeof(float)); for (j = 0; j < m; j++) (*mat)[i][j] = rand_float(MAX); } } // solver void solver(float ***mat, int n, int m){ float diff = 0, temp; int done = 0, cnt_iter = 0, i, j; while (!done && (cnt_iter < MAX_ITER)){ diff = 0; for (i = 1; i < n - 1; i++) for (j = 1; j < m - 1; j++){ temp = (*mat)[i][j]; (*mat)[i][j] = 0.2 * ((*mat)[i][j] + (*mat)[i][j - 1] + (*mat)[i - 1][j] + (*mat)[i][j + 1] + (*mat)[i + 1][j]); diff += abs((*mat)[i][j] - temp); } if (diff/n/n < TOL) done = 1; cnt_iter ++; } if (done) printf("Solver converged after %d iterations\n", cnt_iter); else printf("Solver not converged after %d iterations\n", cnt_iter); } int main(int argc, char *argv[]) { int n, communication; float **a; if (argc < 2) { printf("Call this program with two parameters: matrix_size communication \n"); printf("\t matrix_size: Add 2 to a power of 2 (e.g. : 18, 1026)\n"); exit(1); } n = atoi(argv[1]); printf("Matrix size = %d\n", n); allocate_init_2Dmatrix(&a, n, n); solver(&a, n, n); return 0; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #define MAX_ITER 100 #define MAX 100 //maximum value of the matrix element #define TOL 0.000001 // Generate a random float number with the maximum value of max float rand_float(int max){ return ((float)rand()/(float)(RAND_MAX)) * max; } // Allocate 2D matrix void allocate_init_2Dmatrix(float ***mat, int n, int m){ int i, j; *mat = (float **) malloc(n * sizeof(float *)); for(i = 0; i < n; i++) { (*mat)[i] = (float *)malloc(m * sizeof(float)); for (j = 0; j < m; j++) (*mat)[i][j] = rand_float(MAX); } } // solver void solver(float ***mat, int n, int m){ float diff = 0, temp; int done = 0, cnt_iter = 0, i, j; while (!done && (cnt_iter < MAX_ITER)){ diff = 0; for (i = 1; i < n - 1; i++) for (j = 1; j < m - 1; j++){ temp = (*mat)[i][j]; (*mat)[i][j] = 0.2 * ((*mat)[i][j] + (*mat)[i][j - 1] + (*mat)[i - 1][j] + (*mat)[i][j + 1] + (*mat)[i + 1][j]); diff += abs((*mat)[i][j] - temp); } if (diff/n/n < TOL) done = 1; cnt_iter ++; } if (done) printf("Solver converged after %d iterations\n", cnt_iter); else printf("Solver not converged after %d iterations\n", cnt_iter); } int main(int argc, char *argv[]) { int n, communication; float **a; if (argc < 2) { printf("Call this program with two parameters: matrix_size communication \n"); printf("\t matrix_size: Add 2 to a power of 2 (e.g. : 18, 1026)\n"); exit(1); } n = atoi(argv[1]); printf("Matrix size = %d\n", n); allocate_init_2Dmatrix(&a, n, n); solver(&a, n, n); return 0; }
.text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .amdgpu_metadata --- amdhsa.kernels: [] amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #define MAX_ITER 100 #define MAX 100 //maximum value of the matrix element #define TOL 0.000001 // Generate a random float number with the maximum value of max float rand_float(int max){ return ((float)rand()/(float)(RAND_MAX)) * max; } // Allocate 2D matrix void allocate_init_2Dmatrix(float ***mat, int n, int m){ int i, j; *mat = (float **) malloc(n * sizeof(float *)); for(i = 0; i < n; i++) { (*mat)[i] = (float *)malloc(m * sizeof(float)); for (j = 0; j < m; j++) (*mat)[i][j] = rand_float(MAX); } } // solver void solver(float ***mat, int n, int m){ float diff = 0, temp; int done = 0, cnt_iter = 0, i, j; while (!done && (cnt_iter < MAX_ITER)){ diff = 0; for (i = 1; i < n - 1; i++) for (j = 1; j < m - 1; j++){ temp = (*mat)[i][j]; (*mat)[i][j] = 0.2 * ((*mat)[i][j] + (*mat)[i][j - 1] + (*mat)[i - 1][j] + (*mat)[i][j + 1] + (*mat)[i + 1][j]); diff += abs((*mat)[i][j] - temp); } if (diff/n/n < TOL) done = 1; cnt_iter ++; } if (done) printf("Solver converged after %d iterations\n", cnt_iter); else printf("Solver not converged after %d iterations\n", cnt_iter); } int main(int argc, char *argv[]) { int n, communication; float **a; if (argc < 2) { printf("Call this program with two parameters: matrix_size communication \n"); printf("\t matrix_size: Add 2 to a power of 2 (e.g. : 18, 1026)\n"); exit(1); } n = atoi(argv[1]); printf("Matrix size = %d\n", n); allocate_init_2Dmatrix(&a, n, n); solver(&a, n, n); return 0; }
.text .file "gs_seq.hip" .section .rodata.cst4,"aM",@progbits,4 .p2align 2, 0x0 # -- Begin function _Z10rand_floati .LCPI0_0: .long 0x30000000 # float 4.65661287E-10 .text .globl _Z10rand_floati .p2align 4, 0x90 .type _Z10rand_floati,@function _Z10rand_floati: # @_Z10rand_floati .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset %rbx, -16 movl %edi, %ebx callq rand cvtsi2ss %eax, %xmm1 cvtsi2ss %ebx, %xmm0 mulss .LCPI0_0(%rip), %xmm1 mulss %xmm1, %xmm0 popq %rbx .cfi_def_cfa_offset 8 retq .Lfunc_end0: .size _Z10rand_floati, .Lfunc_end0-_Z10rand_floati .cfi_endproc # -- End function .section .rodata.cst4,"aM",@progbits,4 .p2align 2, 0x0 # -- Begin function _Z22allocate_init_2DmatrixPPPfii .LCPI1_0: .long 0x30000000 # float 4.65661287E-10 .LCPI1_1: .long 0x42c80000 # float 100 .text .globl _Z22allocate_init_2DmatrixPPPfii .p2align 4, 0x90 .type _Z22allocate_init_2DmatrixPPPfii,@function _Z22allocate_init_2DmatrixPPPfii: # @_Z22allocate_init_2DmatrixPPPfii .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 pushq %rax .cfi_def_cfa_offset 64 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movl %edx, 4(%rsp) # 4-byte Spill movl %esi, %ebp movq %rdi, %r14 movslq %esi, %rbx leaq (,%rbx,8), %rdi callq malloc movq %rax, (%r14) testl %ebx, %ebx jle .LBB1_6 # %bb.1: # %.lr.ph17 movslq 4(%rsp), %rax # 4-byte Folded Reload leaq (,%rax,4), %r15 movl %ebp, %r12d movl %eax, %r13d xorl %ebp, %ebp jmp .LBB1_2 .p2align 4, 0x90 .LBB1_5: # %._crit_edge # in Loop: Header=BB1_2 Depth=1 incq %rbp cmpq %r12, %rbp je .LBB1_6 .LBB1_2: # =>This Loop Header: Depth=1 # Child Loop BB1_4 Depth 2 movq %r15, %rdi callq malloc movq (%r14), %rcx movq %rax, (%rcx,%rbp,8) cmpl $0, 4(%rsp) # 4-byte Folded Reload jle .LBB1_5 # %bb.3: # %.lr.ph.preheader # in Loop: Header=BB1_2 Depth=1 xorl %ebx, %ebx .p2align 4, 0x90 .LBB1_4: # %.lr.ph # Parent Loop BB1_2 Depth=1 # => This Inner Loop Header: Depth=2 callq rand movss .LCPI1_0(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero xorps %xmm0, %xmm0 cvtsi2ss %eax, %xmm0 mulss %xmm1, %xmm0 movss .LCPI1_1(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero mulss %xmm1, %xmm0 movq (%r14), %rax movq (%rax,%rbp,8), %rax movss %xmm0, (%rax,%rbx,4) incq %rbx cmpq %rbx, %r13 jne .LBB1_4 jmp .LBB1_5 .LBB1_6: # %._crit_edge18 addq $8, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size _Z22allocate_init_2DmatrixPPPfii, .Lfunc_end1-_Z22allocate_init_2DmatrixPPPfii .cfi_endproc # -- End function .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 # -- Begin function _Z6solverPPPfii .LCPI2_0: .quad 0x3fc999999999999a # double 0.20000000000000001 .LCPI2_2: .quad 0x3eb0c6f7a0b5ed8d # double 9.9999999999999995E-7 .section .rodata.cst16,"aM",@progbits,16 .p2align 4, 0x0 .LCPI2_1: .long 0x7fffffff # float NaN .long 0x7fffffff # float NaN .long 0x7fffffff # float NaN .long 0x7fffffff # float NaN .text .globl _Z6solverPPPfii .p2align 4, 0x90 .type _Z6solverPPPfii,@function _Z6solverPPPfii: # @_Z6solverPPPfii .cfi_startproc # %bb.0: pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %r13 .cfi_def_cfa_offset 32 pushq %r12 .cfi_def_cfa_offset 40 pushq %rbx .cfi_def_cfa_offset 48 .cfi_offset %rbx, -48 .cfi_offset %r12, -40 .cfi_offset %r13, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 # kill: def $edx killed $edx def $rdx # kill: def $esi killed $esi def $rsi cvtsi2ss %esi, %xmm0 leal -1(%rsi), %ecx leal -1(%rdx), %r8d xorl %r9d, %r9d movsd .LCPI2_0(%rip), %xmm1 # xmm1 = mem[0],zero movaps .LCPI2_1(%rip), %xmm2 # xmm2 = [NaN,NaN,NaN,NaN] movsd .LCPI2_2(%rip), %xmm3 # xmm3 = mem[0],zero movl $1, %r10d xorl %r11d, %r11d .p2align 4, 0x90 .LBB2_1: # %.preheader45 # =>This Loop Header: Depth=1 # Child Loop BB2_3 Depth 2 # Child Loop BB2_5 Depth 3 xorps %xmm4, %xmm4 cmpl $3, %esi jl .LBB2_7 # %bb.2: # %.preheader.preheader # in Loop: Header=BB2_1 Depth=1 movl $1, %eax jmp .LBB2_3 .p2align 4, 0x90 .LBB2_6: # %._crit_edge # in Loop: Header=BB2_3 Depth=2 incq %rax cmpq %rcx, %rax je .LBB2_7 .LBB2_3: # %.preheader # Parent Loop BB2_1 Depth=1 # => This Loop Header: Depth=2 # Child Loop BB2_5 Depth 3 cmpl $3, %edx jl .LBB2_6 # %bb.4: # %.lr.ph # in Loop: Header=BB2_3 Depth=2 movq (%rdi), %r15 movq -8(%r15,%rax,8), %rbx movq (%r15,%rax,8), %r14 movq 8(%r15,%rax,8), %r15 movss (%r14), %xmm5 # xmm5 = mem[0],zero,zero,zero movl $1, %r12d .p2align 4, 0x90 .LBB2_5: # Parent Loop BB2_1 Depth=1 # Parent Loop BB2_3 Depth=2 # => This Inner Loop Header: Depth=3 movss (%r14,%r12,4), %xmm6 # xmm6 = mem[0],zero,zero,zero addss %xmm6, %xmm5 addss (%rbx,%r12,4), %xmm5 addss 4(%r14,%r12,4), %xmm5 addss (%r15,%r12,4), %xmm5 cvtss2sd %xmm5, %xmm5 mulsd %xmm1, %xmm5 cvtsd2ss %xmm5, %xmm5 movss %xmm5, (%r14,%r12,4) leaq 1(%r12), %r13 movaps %xmm5, %xmm7 subss %xmm6, %xmm7 andps %xmm2, %xmm7 addss %xmm7, %xmm4 movq %r13, %r12 cmpq %r13, %r8 jne .LBB2_5 jmp .LBB2_6 .p2align 4, 0x90 .LBB2_7: # %._crit_edge50 # in Loop: Header=BB2_1 Depth=1 divss %xmm0, %xmm4 divss %xmm0, %xmm4 cvtss2sd %xmm4, %xmm4 ucomisd %xmm4, %xmm3 cmoval %r10d, %r11d leal 1(%r9), %eax testl %r11d, %r11d jne .LBB2_9 # %bb.8: # %._crit_edge50 # in Loop: Header=BB2_1 Depth=1 cmpl $99, %r9d movl %eax, %r9d jb .LBB2_1 .LBB2_9: testl %r11d, %r11d movl $.L.str.1, %ecx movl $.L.str, %edi cmoveq %rcx, %rdi movl %eax, %esi xorl %eax, %eax popq %rbx .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 jmp printf # TAILCALL .Lfunc_end2: .size _Z6solverPPPfii, .Lfunc_end2-_Z6solverPPPfii .cfi_endproc # -- End function .section .rodata.cst4,"aM",@progbits,4 .p2align 2, 0x0 # -- Begin function main .LCPI3_0: .long 0x30000000 # float 4.65661287E-10 .LCPI3_1: .long 0x42c80000 # float 100 .text .globl main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 pushq %rax .cfi_def_cfa_offset 64 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 cmpl $1, %edi jle .LBB3_7 # %bb.1: movq 8(%rsi), %rdi xorl %esi, %esi movl $10, %edx callq __isoc23_strtol movq %rax, %rbx movl $.L.str.4, %edi movl %ebx, %esi xorl %eax, %eax callq printf movq %rbx, %r14 shlq $32, %r14 movq %r14, %rdi sarq $29, %rdi callq malloc movq %rax, (%rsp) testl %ebx, %ebx jle .LBB3_6 # %bb.2: # %.lr.ph17.i movq %rax, %r15 sarq $30, %r14 movl %ebx, %r12d xorl %r13d, %r13d .p2align 4, 0x90 .LBB3_3: # =>This Loop Header: Depth=1 # Child Loop BB3_4 Depth 2 movq %r14, %rdi callq malloc movq %rax, (%r15,%r13,8) xorl %ebp, %ebp .p2align 4, 0x90 .LBB3_4: # %.lr.ph.i # Parent Loop BB3_3 Depth=1 # => This Inner Loop Header: Depth=2 callq rand xorps %xmm0, %xmm0 cvtsi2ss %eax, %xmm0 mulss .LCPI3_0(%rip), %xmm0 mulss .LCPI3_1(%rip), %xmm0 movq (%r15,%r13,8), %rax movss %xmm0, (%rax,%rbp,4) incq %rbp cmpq %rbp, %r12 jne .LBB3_4 # %bb.5: # %._crit_edge.i # in Loop: Header=BB3_3 Depth=1 incq %r13 cmpq %r12, %r13 jne .LBB3_3 .LBB3_6: # %_Z22allocate_init_2DmatrixPPPfii.exit movq %rsp, %rdi movl %ebx, %esi movl %ebx, %edx callq _Z6solverPPPfii xorl %eax, %eax addq $8, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .LBB3_7: .cfi_def_cfa_offset 64 movl $.Lstr, %edi callq puts@PLT movl $.Lstr.1, %edi callq puts@PLT movl $1, %edi callq exit .Lfunc_end3: .size main, .Lfunc_end3-main .cfi_endproc # -- End function .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "Solver converged after %d iterations\n" .size .L.str, 38 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz "Solver not converged after %d iterations\n" .size .L.str.1, 42 .type .L.str.4,@object # @.str.4 .L.str.4: .asciz "Matrix size = %d\n" .size .L.str.4, 18 .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .type .Lstr,@object # @str .section .rodata.str1.1,"aMS",@progbits,1 .Lstr: .asciz "Call this program with two parameters: matrix_size communication " .size .Lstr, 66 .type .Lstr.1,@object # @str.1 .Lstr.1: .asciz "\t matrix_size: Add 2 to a power of 2 (e.g. : 18, 1026)" .size .Lstr.1, 55 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80
.text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .amdgpu_metadata --- amdhsa.kernels: [] amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_0016a701_00000000-6_gs_seq.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2063: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2063: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z10rand_floati .type _Z10rand_floati, @function _Z10rand_floati: .LFB2057: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 movl %edi, %ebx call rand@PLT pxor %xmm0, %xmm0 cvtsi2ssl %eax, %xmm0 mulss .LC0(%rip), %xmm0 pxor %xmm1, %xmm1 cvtsi2ssl %ebx, %xmm1 mulss %xmm1, %xmm0 popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2057: .size _Z10rand_floati, .-_Z10rand_floati .globl _Z22allocate_init_2DmatrixPPPfii .type _Z22allocate_init_2DmatrixPPPfii, @function _Z22allocate_init_2DmatrixPPPfii: .LFB2058: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $24, %rsp .cfi_def_cfa_offset 80 movq %rdi, %r13 movl %esi, %ebx movl %edx, %r15d movslq %esi, %rax salq $3, %rax movq %rax, 8(%rsp) movq %rax, %rdi call malloc@PLT movq %rax, 0(%r13) testl %ebx, %ebx jle .L5 movslq %r15d, %r14 salq $2, %r14 movl $0, %r12d .L9: movq %r12, %rbx addq 0(%r13), %rbx movq %r14, %rdi call malloc@PLT movq %rax, (%rbx) testl %r15d, %r15d jle .L7 movl $0, %ebx .L8: movq 0(%r13), %rax movq %rbx, %rbp addq (%rax,%r12), %rbp movl $100, %edi call _Z10rand_floati movss %xmm0, 0(%rbp) addq $4, %rbx cmpq %rbx, %r14 jne .L8 .L7: addq $8, %r12 cmpq %r12, 8(%rsp) jne .L9 .L5: addq $24, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2058: .size _Z22allocate_init_2DmatrixPPPfii, .-_Z22allocate_init_2DmatrixPPPfii .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC5: .string "Solver converged after %d iterations\n" .align 8 .LC6: .string "Solver not converged after %d iterations\n" .text .globl _Z6solverPPPfii .type _Z6solverPPPfii, @function _Z6solverPPPfii: .LFB2059: .cfi_startproc endbr64 pushq %r14 .cfi_def_cfa_offset 16 .cfi_offset 14, -16 pushq %r13 .cfi_def_cfa_offset 24 .cfi_offset 13, -24 pushq %r12 .cfi_def_cfa_offset 32 .cfi_offset 12, -32 pushq %rbp .cfi_def_cfa_offset 40 .cfi_offset 6, -40 pushq %rbx .cfi_def_cfa_offset 48 .cfi_offset 3, -48 movl %esi, %r8d movl %edx, %ecx leal -3(%rsi), %eax leaq 16(,%rax,8), %r11 leal -3(%rdx), %eax leaq 8(,%rax,4), %r10 movl $0, %edx movsd .LC2(%rip), %xmm4 movss .LC3(%rip), %xmm3 movsd .LC4(%rip), %xmm5 movl %ecx, %r9d jmp .L14 .L15: movq (%rdi), %rbp movq 0(%rbp,%rbx), %r14 leaq (%r14,%rax), %rsi movss (%rsi), %xmm1 movaps %xmm1, %xmm0 addss -4(%r14,%rax), %xmm0 movq 0(%rbp,%r13), %rcx addss (%rcx,%rax), %xmm0 movq %rax, %rcx addq $4, %rax movq 0(%rbp,%r12), %rbp addss (%r14,%rax), %xmm0 addss 0(%rbp,%rcx), %xmm0 cvtss2sd %xmm0, %xmm0 mulsd %xmm4, %xmm0 cvtsd2ss %xmm0, %xmm0 movss %xmm0, (%rsi) movq (%rdi), %rsi movq (%rsi,%rbx), %rsi movss (%rsi,%rcx), %xmm0 subss %xmm1, %xmm0 andps %xmm3, %xmm0 addss %xmm0, %xmm2 cmpq %r10, %rax jne .L15 .L18: addq $8, %rbx cmpq %rbx, %r11 je .L16 .L21: leaq -8(%rbx), %r13 leaq 8(%rbx), %r12 movl $4, %eax cmpl $2, %r9d jg .L15 jmp .L18 .L16: pxor %xmm0, %xmm0 cvtsi2ssl %r8d, %xmm0 divss %xmm0, %xmm2 divss %xmm0, %xmm2 pxor %xmm0, %xmm0 cvtss2sd %xmm2, %xmm0 comisd %xmm0, %xmm5 ja .L19 addl $1, %edx cmpl $100, %edx je .L20 .L14: movl $8, %ebx pxor %xmm2, %xmm2 cmpl $2, %r8d jg .L21 jmp .L16 .L19: addl $1, %edx leaq .LC5(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT .L13: popq %rbx .cfi_remember_state .cfi_def_cfa_offset 40 popq %rbp .cfi_def_cfa_offset 32 popq %r12 .cfi_def_cfa_offset 24 popq %r13 .cfi_def_cfa_offset 16 popq %r14 .cfi_def_cfa_offset 8 ret .L20: .cfi_restore_state movl $100, %edx leaq .LC6(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT jmp .L13 .cfi_endproc .LFE2059: .size _Z6solverPPPfii, .-_Z6solverPPPfii .section .rodata.str1.8 .align 8 .LC7: .string "Call this program with two parameters: matrix_size communication \n" .align 8 .LC8: .string "\t matrix_size: Add 2 to a power of 2 (e.g. : 18, 1026)\n" .section .rodata.str1.1,"aMS",@progbits,1 .LC9: .string "Matrix size = %d\n" .text .globl main .type main, @function main: .LFB2060: .cfi_startproc endbr64 pushq %rbp .cfi_def_cfa_offset 16 .cfi_offset 6, -16 pushq %rbx .cfi_def_cfa_offset 24 .cfi_offset 3, -24 subq $24, %rsp .cfi_def_cfa_offset 48 movq %fs:40, %rax movq %rax, 8(%rsp) xorl %eax, %eax cmpl $1, %edi jle .L35 movq 8(%rsi), %rdi movl $10, %edx movl $0, %esi call __isoc23_strtol@PLT movl %eax, %ebx movl %eax, %edx leaq .LC9(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movq %rsp, %rbp movl %ebx, %edx movl %ebx, %esi movq %rbp, %rdi call _Z22allocate_init_2DmatrixPPPfii movl %ebx, %edx movl %ebx, %esi movq %rbp, %rdi call _Z6solverPPPfii movq 8(%rsp), %rax subq %fs:40, %rax jne .L36 movl $0, %eax addq $24, %rsp .cfi_remember_state .cfi_def_cfa_offset 24 popq %rbx .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 ret .L35: .cfi_restore_state leaq .LC7(%rip), %rsi movl $2, %edi call __printf_chk@PLT leaq .LC8(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $1, %edi call exit@PLT .L36: call __stack_chk_fail@PLT .cfi_endproc .LFE2060: .size main, .-main .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2086: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2086: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .section .rodata.cst4,"aM",@progbits,4 .align 4 .LC0: .long 805306368 .section .rodata.cst8,"aM",@progbits,8 .align 8 .LC2: .long -1717986918 .long 1070176665 .section .rodata.cst16,"aM",@progbits,16 .align 16 .LC3: .long 2147483647 .long 0 .long 0 .long 0 .section .rodata.cst8 .align 8 .LC4: .long -1598689907 .long 1051772663 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "gs_seq.hip" .section .rodata.cst4,"aM",@progbits,4 .p2align 2, 0x0 # -- Begin function _Z10rand_floati .LCPI0_0: .long 0x30000000 # float 4.65661287E-10 .text .globl _Z10rand_floati .p2align 4, 0x90 .type _Z10rand_floati,@function _Z10rand_floati: # @_Z10rand_floati .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset %rbx, -16 movl %edi, %ebx callq rand cvtsi2ss %eax, %xmm1 cvtsi2ss %ebx, %xmm0 mulss .LCPI0_0(%rip), %xmm1 mulss %xmm1, %xmm0 popq %rbx .cfi_def_cfa_offset 8 retq .Lfunc_end0: .size _Z10rand_floati, .Lfunc_end0-_Z10rand_floati .cfi_endproc # -- End function .section .rodata.cst4,"aM",@progbits,4 .p2align 2, 0x0 # -- Begin function _Z22allocate_init_2DmatrixPPPfii .LCPI1_0: .long 0x30000000 # float 4.65661287E-10 .LCPI1_1: .long 0x42c80000 # float 100 .text .globl _Z22allocate_init_2DmatrixPPPfii .p2align 4, 0x90 .type _Z22allocate_init_2DmatrixPPPfii,@function _Z22allocate_init_2DmatrixPPPfii: # @_Z22allocate_init_2DmatrixPPPfii .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 pushq %rax .cfi_def_cfa_offset 64 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movl %edx, 4(%rsp) # 4-byte Spill movl %esi, %ebp movq %rdi, %r14 movslq %esi, %rbx leaq (,%rbx,8), %rdi callq malloc movq %rax, (%r14) testl %ebx, %ebx jle .LBB1_6 # %bb.1: # %.lr.ph17 movslq 4(%rsp), %rax # 4-byte Folded Reload leaq (,%rax,4), %r15 movl %ebp, %r12d movl %eax, %r13d xorl %ebp, %ebp jmp .LBB1_2 .p2align 4, 0x90 .LBB1_5: # %._crit_edge # in Loop: Header=BB1_2 Depth=1 incq %rbp cmpq %r12, %rbp je .LBB1_6 .LBB1_2: # =>This Loop Header: Depth=1 # Child Loop BB1_4 Depth 2 movq %r15, %rdi callq malloc movq (%r14), %rcx movq %rax, (%rcx,%rbp,8) cmpl $0, 4(%rsp) # 4-byte Folded Reload jle .LBB1_5 # %bb.3: # %.lr.ph.preheader # in Loop: Header=BB1_2 Depth=1 xorl %ebx, %ebx .p2align 4, 0x90 .LBB1_4: # %.lr.ph # Parent Loop BB1_2 Depth=1 # => This Inner Loop Header: Depth=2 callq rand movss .LCPI1_0(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero xorps %xmm0, %xmm0 cvtsi2ss %eax, %xmm0 mulss %xmm1, %xmm0 movss .LCPI1_1(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero mulss %xmm1, %xmm0 movq (%r14), %rax movq (%rax,%rbp,8), %rax movss %xmm0, (%rax,%rbx,4) incq %rbx cmpq %rbx, %r13 jne .LBB1_4 jmp .LBB1_5 .LBB1_6: # %._crit_edge18 addq $8, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size _Z22allocate_init_2DmatrixPPPfii, .Lfunc_end1-_Z22allocate_init_2DmatrixPPPfii .cfi_endproc # -- End function .section .rodata.cst8,"aM",@progbits,8 .p2align 3, 0x0 # -- Begin function _Z6solverPPPfii .LCPI2_0: .quad 0x3fc999999999999a # double 0.20000000000000001 .LCPI2_2: .quad 0x3eb0c6f7a0b5ed8d # double 9.9999999999999995E-7 .section .rodata.cst16,"aM",@progbits,16 .p2align 4, 0x0 .LCPI2_1: .long 0x7fffffff # float NaN .long 0x7fffffff # float NaN .long 0x7fffffff # float NaN .long 0x7fffffff # float NaN .text .globl _Z6solverPPPfii .p2align 4, 0x90 .type _Z6solverPPPfii,@function _Z6solverPPPfii: # @_Z6solverPPPfii .cfi_startproc # %bb.0: pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %r13 .cfi_def_cfa_offset 32 pushq %r12 .cfi_def_cfa_offset 40 pushq %rbx .cfi_def_cfa_offset 48 .cfi_offset %rbx, -48 .cfi_offset %r12, -40 .cfi_offset %r13, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 # kill: def $edx killed $edx def $rdx # kill: def $esi killed $esi def $rsi cvtsi2ss %esi, %xmm0 leal -1(%rsi), %ecx leal -1(%rdx), %r8d xorl %r9d, %r9d movsd .LCPI2_0(%rip), %xmm1 # xmm1 = mem[0],zero movaps .LCPI2_1(%rip), %xmm2 # xmm2 = [NaN,NaN,NaN,NaN] movsd .LCPI2_2(%rip), %xmm3 # xmm3 = mem[0],zero movl $1, %r10d xorl %r11d, %r11d .p2align 4, 0x90 .LBB2_1: # %.preheader45 # =>This Loop Header: Depth=1 # Child Loop BB2_3 Depth 2 # Child Loop BB2_5 Depth 3 xorps %xmm4, %xmm4 cmpl $3, %esi jl .LBB2_7 # %bb.2: # %.preheader.preheader # in Loop: Header=BB2_1 Depth=1 movl $1, %eax jmp .LBB2_3 .p2align 4, 0x90 .LBB2_6: # %._crit_edge # in Loop: Header=BB2_3 Depth=2 incq %rax cmpq %rcx, %rax je .LBB2_7 .LBB2_3: # %.preheader # Parent Loop BB2_1 Depth=1 # => This Loop Header: Depth=2 # Child Loop BB2_5 Depth 3 cmpl $3, %edx jl .LBB2_6 # %bb.4: # %.lr.ph # in Loop: Header=BB2_3 Depth=2 movq (%rdi), %r15 movq -8(%r15,%rax,8), %rbx movq (%r15,%rax,8), %r14 movq 8(%r15,%rax,8), %r15 movss (%r14), %xmm5 # xmm5 = mem[0],zero,zero,zero movl $1, %r12d .p2align 4, 0x90 .LBB2_5: # Parent Loop BB2_1 Depth=1 # Parent Loop BB2_3 Depth=2 # => This Inner Loop Header: Depth=3 movss (%r14,%r12,4), %xmm6 # xmm6 = mem[0],zero,zero,zero addss %xmm6, %xmm5 addss (%rbx,%r12,4), %xmm5 addss 4(%r14,%r12,4), %xmm5 addss (%r15,%r12,4), %xmm5 cvtss2sd %xmm5, %xmm5 mulsd %xmm1, %xmm5 cvtsd2ss %xmm5, %xmm5 movss %xmm5, (%r14,%r12,4) leaq 1(%r12), %r13 movaps %xmm5, %xmm7 subss %xmm6, %xmm7 andps %xmm2, %xmm7 addss %xmm7, %xmm4 movq %r13, %r12 cmpq %r13, %r8 jne .LBB2_5 jmp .LBB2_6 .p2align 4, 0x90 .LBB2_7: # %._crit_edge50 # in Loop: Header=BB2_1 Depth=1 divss %xmm0, %xmm4 divss %xmm0, %xmm4 cvtss2sd %xmm4, %xmm4 ucomisd %xmm4, %xmm3 cmoval %r10d, %r11d leal 1(%r9), %eax testl %r11d, %r11d jne .LBB2_9 # %bb.8: # %._crit_edge50 # in Loop: Header=BB2_1 Depth=1 cmpl $99, %r9d movl %eax, %r9d jb .LBB2_1 .LBB2_9: testl %r11d, %r11d movl $.L.str.1, %ecx movl $.L.str, %edi cmoveq %rcx, %rdi movl %eax, %esi xorl %eax, %eax popq %rbx .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 jmp printf # TAILCALL .Lfunc_end2: .size _Z6solverPPPfii, .Lfunc_end2-_Z6solverPPPfii .cfi_endproc # -- End function .section .rodata.cst4,"aM",@progbits,4 .p2align 2, 0x0 # -- Begin function main .LCPI3_0: .long 0x30000000 # float 4.65661287E-10 .LCPI3_1: .long 0x42c80000 # float 100 .text .globl main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 pushq %rax .cfi_def_cfa_offset 64 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 cmpl $1, %edi jle .LBB3_7 # %bb.1: movq 8(%rsi), %rdi xorl %esi, %esi movl $10, %edx callq __isoc23_strtol movq %rax, %rbx movl $.L.str.4, %edi movl %ebx, %esi xorl %eax, %eax callq printf movq %rbx, %r14 shlq $32, %r14 movq %r14, %rdi sarq $29, %rdi callq malloc movq %rax, (%rsp) testl %ebx, %ebx jle .LBB3_6 # %bb.2: # %.lr.ph17.i movq %rax, %r15 sarq $30, %r14 movl %ebx, %r12d xorl %r13d, %r13d .p2align 4, 0x90 .LBB3_3: # =>This Loop Header: Depth=1 # Child Loop BB3_4 Depth 2 movq %r14, %rdi callq malloc movq %rax, (%r15,%r13,8) xorl %ebp, %ebp .p2align 4, 0x90 .LBB3_4: # %.lr.ph.i # Parent Loop BB3_3 Depth=1 # => This Inner Loop Header: Depth=2 callq rand xorps %xmm0, %xmm0 cvtsi2ss %eax, %xmm0 mulss .LCPI3_0(%rip), %xmm0 mulss .LCPI3_1(%rip), %xmm0 movq (%r15,%r13,8), %rax movss %xmm0, (%rax,%rbp,4) incq %rbp cmpq %rbp, %r12 jne .LBB3_4 # %bb.5: # %._crit_edge.i # in Loop: Header=BB3_3 Depth=1 incq %r13 cmpq %r12, %r13 jne .LBB3_3 .LBB3_6: # %_Z22allocate_init_2DmatrixPPPfii.exit movq %rsp, %rdi movl %ebx, %esi movl %ebx, %edx callq _Z6solverPPPfii xorl %eax, %eax addq $8, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .LBB3_7: .cfi_def_cfa_offset 64 movl $.Lstr, %edi callq puts@PLT movl $.Lstr.1, %edi callq puts@PLT movl $1, %edi callq exit .Lfunc_end3: .size main, .Lfunc_end3-main .cfi_endproc # -- End function .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "Solver converged after %d iterations\n" .size .L.str, 38 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz "Solver not converged after %d iterations\n" .size .L.str.1, 42 .type .L.str.4,@object # @.str.4 .L.str.4: .asciz "Matrix size = %d\n" .size .L.str.4, 18 .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .type .Lstr,@object # @str .section .rodata.str1.1,"aMS",@progbits,1 .Lstr: .asciz "Call this program with two parameters: matrix_size communication " .size .Lstr, 66 .type .Lstr.1,@object # @str.1 .Lstr.1: .asciz "\t matrix_size: Add 2 to a power of 2 (e.g. : 18, 1026)" .size .Lstr.1, 55 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include "ward_implement.h" #include "brdf_common.h" __global__ void ward_kernel(float3* pos, unsigned int width, float3 V, float3 N, float3 X, float3 Y, float alpha_x, float alpha_y, bool anisotropic) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; float3 L = calculateL(pos, width, x, y); float3 H = normalize(add(L, V)); float ax = alpha_x; float ay = anisotropic ? alpha_y : alpha_x; float exponent = -2.f * (sqr(dot(H,X) / ax) + sqr(dot(H,Y) / ay)) / sqrt(dot(H, N)); float spec = 1.f / (4.f * 3.1415926f * ax * ay * sqrt(dot(L,N) * dot(V, N))); spec *= exp(exponent); pos[y*width+x] = scale(L, spec); } extern "C" void ward_brdf(float3 *pos, unsigned numVertices, unsigned width, float3 V, float3 N, float3 X, float3 Y, float alpha_x, float alpha_y, bool anisotropic) { dim3 block(8, 8, 1); unsigned height = numVertices / width; dim3 grid(width / block.x, height / block.y, 1); ward_kernel<<< grid, block>>>(pos, width, V, N, X, Y, alpha_x, alpha_y, anisotropic); }
.file "tmpxft_000c8787_00000000-6_ward.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2041: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2041: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z49__device_stub__Z11ward_kernelP6float3jS_S_S_S_ffbP6float3jRS_S1_S1_S1_ffb .type _Z49__device_stub__Z11ward_kernelP6float3jS_S_S_S_ffbP6float3jRS_S1_S1_S1_ffb, @function _Z49__device_stub__Z11ward_kernelP6float3jS_S_S_S_ffbP6float3jRS_S1_S1_S1_ffb: .LFB2063: .cfi_startproc endbr64 subq $184, %rsp .cfi_def_cfa_offset 192 movq %rdi, 24(%rsp) movl %esi, 20(%rsp) movss %xmm0, 16(%rsp) movss %xmm1, 12(%rsp) movl 192(%rsp), %eax movb %al, 8(%rsp) movq %fs:40, %rax movq %rax, 168(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 20(%rsp), %rax movq %rax, 104(%rsp) movq %rdx, 112(%rsp) movq %rcx, 120(%rsp) movq %r8, 128(%rsp) movq %r9, 136(%rsp) leaq 16(%rsp), %rax movq %rax, 144(%rsp) leaq 12(%rsp), %rax movq %rax, 152(%rsp) leaq 8(%rsp), %rax movq %rax, 160(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 168(%rsp), %rax subq %fs:40, %rax jne .L8 addq $184, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 200 pushq 40(%rsp) .cfi_def_cfa_offset 208 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z11ward_kernelP6float3jS_S_S_S_ffb(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 192 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2063: .size _Z49__device_stub__Z11ward_kernelP6float3jS_S_S_S_ffbP6float3jRS_S1_S1_S1_ffb, .-_Z49__device_stub__Z11ward_kernelP6float3jS_S_S_S_ffbP6float3jRS_S1_S1_S1_ffb .globl _Z11ward_kernelP6float3jS_S_S_S_ffb .type _Z11ward_kernelP6float3jS_S_S_S_ffb, @function _Z11ward_kernelP6float3jS_S_S_S_ffb: .LFB2064: .cfi_startproc endbr64 subq $72, %rsp .cfi_def_cfa_offset 80 movq %xmm0, 48(%rsp) movss %xmm1, 56(%rsp) movq %xmm2, 32(%rsp) movss %xmm3, 40(%rsp) movq %xmm4, 16(%rsp) movss %xmm5, 24(%rsp) movq %xmm6, (%rsp) movss %xmm7, 8(%rsp) leaq 32(%rsp), %rcx leaq 48(%rsp), %rax subq $8, %rsp .cfi_def_cfa_offset 88 movzbl %dl, %edx pushq %rdx .cfi_def_cfa_offset 96 movss 104(%rsp), %xmm1 movss 96(%rsp), %xmm0 leaq 16(%rsp), %r9 leaq 32(%rsp), %r8 movq %rax, %rdx call _Z49__device_stub__Z11ward_kernelP6float3jS_S_S_S_ffbP6float3jRS_S1_S1_S1_ffb addq $88, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2064: .size _Z11ward_kernelP6float3jS_S_S_S_ffb, .-_Z11ward_kernelP6float3jS_S_S_S_ffb .globl ward_brdf .type ward_brdf, @function ward_brdf: .LFB2038: .cfi_startproc endbr64 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 subq $144, %rsp .cfi_def_cfa_offset 176 movq %rdi, %r12 movl %esi, %eax movl %edx, %ebx movl %ecx, %ebp movq %xmm0, 48(%rsp) movss %xmm1, 56(%rsp) movq %xmm2, 32(%rsp) movss %xmm3, 40(%rsp) movq %xmm4, 16(%rsp) movss %xmm5, 24(%rsp) movq %xmm6, (%rsp) movss %xmm7, 8(%rsp) movq %fs:40, %rdx movq %rdx, 136(%rsp) xorl %edx, %edx movl %ebx, %edx shrl $3, %edx movl %edx, 76(%rsp) movl $0, %edx divl %ebx shrl $3, %eax movl %eax, 80(%rsp) movl $8, 64(%rsp) movl $8, 68(%rsp) movl $0, %r9d movl $0, %r8d movq 64(%rsp), %rdx movl $1, %ecx movq 76(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L15 .L11: movq 136(%rsp), %rax subq %fs:40, %rax jne .L16 addq $144, %rsp .cfi_remember_state .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .L15: .cfi_restore_state movq 48(%rsp), %rax movq %rax, 88(%rsp) movl 56(%rsp), %eax movl %eax, 96(%rsp) movq 32(%rsp), %rax movq %rax, 100(%rsp) movl 40(%rsp), %eax movl %eax, 108(%rsp) movq 16(%rsp), %rax movq %rax, 112(%rsp) movl 24(%rsp), %eax movl %eax, 120(%rsp) movq (%rsp), %rax movq %rax, 124(%rsp) movl 8(%rsp), %eax movl %eax, 132(%rsp) leaq 100(%rsp), %rcx leaq 88(%rsp), %rdx subq $8, %rsp .cfi_def_cfa_offset 184 movzbl %bpl, %ebp pushq %rbp .cfi_def_cfa_offset 192 movss 200(%rsp), %xmm1 movss 192(%rsp), %xmm0 leaq 140(%rsp), %r9 leaq 128(%rsp), %r8 movl %ebx, %esi movq %r12, %rdi call _Z49__device_stub__Z11ward_kernelP6float3jS_S_S_S_ffbP6float3jRS_S1_S1_S1_ffb addq $16, %rsp .cfi_def_cfa_offset 176 jmp .L11 .L16: call __stack_chk_fail@PLT .cfi_endproc .LFE2038: .size ward_brdf, .-ward_brdf .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC0: .string "_Z11ward_kernelP6float3jS_S_S_S_ffb" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2066: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z11ward_kernelP6float3jS_S_S_S_ffb(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2066: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include "ward_implement.h" #include "brdf_common.h" __global__ void ward_kernel(float3* pos, unsigned int width, float3 V, float3 N, float3 X, float3 Y, float alpha_x, float alpha_y, bool anisotropic) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; float3 L = calculateL(pos, width, x, y); float3 H = normalize(add(L, V)); float ax = alpha_x; float ay = anisotropic ? alpha_y : alpha_x; float exponent = -2.f * (sqr(dot(H,X) / ax) + sqr(dot(H,Y) / ay)) / sqrt(dot(H, N)); float spec = 1.f / (4.f * 3.1415926f * ax * ay * sqrt(dot(L,N) * dot(V, N))); spec *= exp(exponent); pos[y*width+x] = scale(L, spec); } extern "C" void ward_brdf(float3 *pos, unsigned numVertices, unsigned width, float3 V, float3 N, float3 X, float3 Y, float alpha_x, float alpha_y, bool anisotropic) { dim3 block(8, 8, 1); unsigned height = numVertices / width; dim3 grid(width / block.x, height / block.y, 1); ward_kernel<<< grid, block>>>(pos, width, V, N, X, Y, alpha_x, alpha_y, anisotropic); }
#include <hip/hip_runtime.h> #include "ward_implement.h" #include "brdf_common.h" __global__ void ward_kernel(float3* pos, unsigned int width, float3 V, float3 N, float3 X, float3 Y, float alpha_x, float alpha_y, bool anisotropic) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; float3 L = calculateL(pos, width, x, y); float3 H = normalize(add(L, V)); float ax = alpha_x; float ay = anisotropic ? alpha_y : alpha_x; float exponent = -2.f * (sqr(dot(H,X) / ax) + sqr(dot(H,Y) / ay)) / sqrt(dot(H, N)); float spec = 1.f / (4.f * 3.1415926f * ax * ay * sqrt(dot(L,N) * dot(V, N))); spec *= exp(exponent); pos[y*width+x] = scale(L, spec); } extern "C" void ward_brdf(float3 *pos, unsigned numVertices, unsigned width, float3 V, float3 N, float3 X, float3 Y, float alpha_x, float alpha_y, bool anisotropic) { dim3 block(8, 8, 1); unsigned height = numVertices / width; dim3 grid(width / block.x, height / block.y, 1); ward_kernel<<< grid, block>>>(pos, width, V, N, X, Y, alpha_x, alpha_y, anisotropic); }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include "ward_implement.h" #include "brdf_common.h" __global__ void ward_kernel(float3* pos, unsigned int width, float3 V, float3 N, float3 X, float3 Y, float alpha_x, float alpha_y, bool anisotropic) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; float3 L = calculateL(pos, width, x, y); float3 H = normalize(add(L, V)); float ax = alpha_x; float ay = anisotropic ? alpha_y : alpha_x; float exponent = -2.f * (sqr(dot(H,X) / ax) + sqr(dot(H,Y) / ay)) / sqrt(dot(H, N)); float spec = 1.f / (4.f * 3.1415926f * ax * ay * sqrt(dot(L,N) * dot(V, N))); spec *= exp(exponent); pos[y*width+x] = scale(L, spec); } extern "C" void ward_brdf(float3 *pos, unsigned numVertices, unsigned width, float3 V, float3 N, float3 X, float3 Y, float alpha_x, float alpha_y, bool anisotropic) { dim3 block(8, 8, 1); unsigned height = numVertices / width; dim3 grid(width / block.x, height / block.y, 1); ward_kernel<<< grid, block>>>(pos, width, V, N, X, Y, alpha_x, alpha_y, anisotropic); }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z11ward_kernelP15HIP_vector_typeIfLj3EEjS0_S0_S0_S0_ffb .globl _Z11ward_kernelP15HIP_vector_typeIfLj3EEjS0_S0_S0_S0_ffb .p2align 8 .type _Z11ward_kernelP15HIP_vector_typeIfLj3EEjS0_S0_S0_S0_ffb,@function _Z11ward_kernelP15HIP_vector_typeIfLj3EEjS0_S0_S0_S0_ffb: s_clause 0x1 s_load_b32 s2, s[0:1], 0x54 s_load_b128 s[16:19], s[0:1], 0x0 v_bfe_u32 v1, v0, 10, 10 v_and_b32_e32 v0, 0x3ff, v0 s_mov_b32 s5, 0x3ee4f8b5 s_mov_b32 s4, 0x88e368f1 s_load_b64 s[12:13], s[0:1], 0x10 s_waitcnt lgkmcnt(0) s_lshr_b32 s3, s2, 16 s_and_b32 s2, s2, 0xffff v_mad_u64_u32 v[2:3], null, s15, s3, v[1:2] s_mul_i32 s14, s14, s2 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mul_lo_u32 v1, v2, s18 v_add3_u32 v0, s14, v0, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1) v_mad_u64_u32 v[3:4], null, v0, 12, s[16:17] global_load_b96 v[0:2], v[3:4], off s_waitcnt vmcnt(0) v_mul_f32_e32 v5, v1, v1 v_fmac_f32_e32 v5, v0, v0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_fmac_f32_e32 v5, v2, v2 v_mul_f32_e32 v6, 0x4f800000, v5 v_cmp_gt_f32_e32 vcc_lo, 0xf800000, v5 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_cndmask_b32_e32 v5, v5, v6, vcc_lo v_sqrt_f32_e32 v6, v5 s_waitcnt_depctr 0xfff v_add_nc_u32_e32 v7, -1, v6 v_add_nc_u32_e32 v8, 1, v6 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_fma_f32 v9, -v7, v6, v5 v_fma_f32 v10, -v8, v6, v5 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_cmp_ge_f32_e64 s2, 0, v9 v_cndmask_b32_e64 v6, v6, v7, s2 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) v_cmp_lt_f32_e64 s2, 0, v10 v_cndmask_b32_e64 v6, v6, v8, s2 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mul_f32_e32 v7, 0x37800000, v6 v_cndmask_b32_e32 v6, v6, v7, vcc_lo v_cmp_class_f32_e64 vcc_lo, v5, 0x260 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_dual_mov_b32 v7, 0x3f13cd36 :: v_dual_cndmask_b32 v8, v6, v5 v_cvt_f64_f32_e32 v[5:6], v8 s_delay_alu instid0(VALU_DEP_1) v_cmp_lt_f64_e32 vcc_lo, s[4:5], v[5:6] v_dual_mov_b32 v5, 0x3f13cd36 :: v_dual_mov_b32 v6, 0x3f13cd36 s_and_saveexec_b32 s6, vcc_lo s_cbranch_execz .LBB0_2 v_div_scale_f32 v5, null, v8, v8, v0 v_div_scale_f32 v6, null, v8, v8, v1 v_div_scale_f32 v7, null, v8, v8, v2 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) v_rcp_f32_e32 v9, v5 v_rcp_f32_e32 v10, v6 v_div_scale_f32 v12, vcc_lo, v0, v8, v0 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(TRANS32_DEP_3) v_rcp_f32_e32 v11, v7 v_div_scale_f32 v13, s2, v1, v8, v1 v_div_scale_f32 v17, s3, v2, v8, v2 v_fma_f32 v14, -v5, v9, 1.0 s_waitcnt_depctr 0xfff v_fma_f32 v15, -v6, v10, 1.0 v_fma_f32 v16, -v7, v11, 1.0 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_dual_fmac_f32 v9, v14, v9 :: v_dual_fmac_f32 v10, v15, v10 v_dual_mul_f32 v14, v12, v9 :: v_dual_mul_f32 v15, v13, v10 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4) v_fma_f32 v18, -v5, v14, v12 v_fmac_f32_e32 v11, v16, v11 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) v_fma_f32 v19, -v6, v15, v13 v_fmac_f32_e32 v14, v18, v9 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_dual_mul_f32 v16, v17, v11 :: v_dual_fmac_f32 v15, v19, v10 v_fma_f32 v5, -v5, v14, v12 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) v_fma_f32 v20, -v7, v16, v17 v_fma_f32 v6, -v6, v15, v13 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) v_div_fmas_f32 v5, v5, v9, v14 v_fmac_f32_e32 v16, v20, v11 s_mov_b32 vcc_lo, s2 s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_div_fmas_f32 v6, v6, v10, v15 s_mov_b32 vcc_lo, s3 v_fma_f32 v7, -v7, v16, v17 v_div_fixup_f32 v5, v5, v8, v0 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) v_div_fixup_f32 v6, v6, v8, v1 v_div_fmas_f32 v7, v7, v11, v16 s_delay_alu instid0(VALU_DEP_1) v_div_fixup_f32 v7, v7, v8, v2 .LBB0_2: s_or_b32 exec_lo, exec_lo, s6 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_dual_add_f32 v0, s12, v6 :: v_dual_add_f32 v1, s19, v5 v_add_f32_e32 v2, s13, v7 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_mul_f32_e32 v8, v0, v0 v_fmac_f32_e32 v8, v1, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_fmac_f32_e32 v8, v2, v2 v_mul_f32_e32 v9, 0x4f800000, v8 v_cmp_gt_f32_e32 vcc_lo, 0xf800000, v8 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_cndmask_b32_e32 v8, v8, v9, vcc_lo v_sqrt_f32_e32 v9, v8 s_waitcnt_depctr 0xfff v_add_nc_u32_e32 v10, -1, v9 v_add_nc_u32_e32 v11, 1, v9 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_fma_f32 v12, -v10, v9, v8 v_fma_f32 v13, -v11, v9, v8 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_cmp_ge_f32_e64 s2, 0, v12 v_cndmask_b32_e64 v9, v9, v10, s2 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) v_cmp_lt_f32_e64 s2, 0, v13 v_cndmask_b32_e64 v9, v9, v11, s2 v_mov_b32_e32 v11, 0x3f13cd36 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_mul_f32_e32 v10, 0x37800000, v9 v_cndmask_b32_e32 v9, v9, v10, vcc_lo v_cmp_class_f32_e64 vcc_lo, v8, 0x260 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_cndmask_b32_e32 v8, v9, v8, vcc_lo v_cvt_f64_f32_e32 v[9:10], v8 s_delay_alu instid0(VALU_DEP_1) v_cmp_lt_f64_e32 vcc_lo, s[4:5], v[9:10] s_clause 0x1 s_load_b256 s[4:11], s[0:1], 0x18 s_load_b32 s16, s[0:1], 0x38 v_dual_mov_b32 v10, 0x3f13cd36 :: v_dual_mov_b32 v9, 0x3f13cd36 s_and_saveexec_b32 s14, vcc_lo s_cbranch_execz .LBB0_4 v_div_scale_f32 v9, null, v8, v8, v1 v_div_scale_f32 v10, null, v8, v8, v0 v_div_scale_f32 v11, null, v8, v8, v2 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) v_rcp_f32_e32 v12, v9 v_rcp_f32_e32 v13, v10 v_div_scale_f32 v15, vcc_lo, v1, v8, v1 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(TRANS32_DEP_3) v_rcp_f32_e32 v14, v11 v_div_scale_f32 v16, s2, v0, v8, v0 v_div_scale_f32 v20, s3, v2, v8, v2 v_fma_f32 v17, -v9, v12, 1.0 s_waitcnt_depctr 0xfff v_fma_f32 v18, -v10, v13, 1.0 v_fma_f32 v19, -v11, v14, 1.0 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_dual_fmac_f32 v12, v17, v12 :: v_dual_fmac_f32 v13, v18, v13 v_dual_mul_f32 v17, v15, v12 :: v_dual_mul_f32 v18, v16, v13 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4) v_fma_f32 v21, -v9, v17, v15 v_fmac_f32_e32 v14, v19, v14 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) v_fma_f32 v22, -v10, v18, v16 v_fmac_f32_e32 v17, v21, v12 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_dual_mul_f32 v19, v20, v14 :: v_dual_fmac_f32 v18, v22, v13 v_fma_f32 v9, -v9, v17, v15 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) v_fma_f32 v23, -v11, v19, v20 v_fma_f32 v10, -v10, v18, v16 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) v_div_fmas_f32 v9, v9, v12, v17 v_fmac_f32_e32 v19, v23, v14 s_mov_b32 vcc_lo, s2 s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_div_fmas_f32 v12, v10, v13, v18 s_mov_b32 vcc_lo, s3 v_fma_f32 v11, -v11, v19, v20 v_div_fixup_f32 v10, v9, v8, v1 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_div_fmas_f32 v13, v11, v14, v19 v_div_fixup_f32 v11, v12, v8, v0 v_div_fixup_f32 v9, v13, v8, v2 .LBB0_4: s_or_b32 exec_lo, exec_lo, s14 s_clause 0x1 s_load_b64 s[14:15], s[0:1], 0x3c s_load_b32 s0, s[0:1], 0x44 s_waitcnt lgkmcnt(0) v_mul_f32_e32 v1, s11, v11 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_dual_mul_f32 v0, s8, v11 :: v_dual_fmac_f32 v1, s10, v10 v_mul_f32_e32 v2, s5, v11 v_dual_fmac_f32 v0, s7, v10 :: v_dual_fmac_f32 v1, s16, v9 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_fmac_f32_e32 v2, s4, v10 v_fmac_f32_e32 v2, s6, v9 v_mov_b32_e32 v8, s15 s_bitcmp1_b32 s0, 0 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1) v_mul_f32_e32 v13, 0x4f800000, v2 v_cmp_gt_f32_e64 s0, 0xf800000, v2 s_cselect_b32 vcc_lo, -1, 0 v_cndmask_b32_e64 v2, v2, v13, s0 s_delay_alu instid0(VALU_DEP_1) v_sqrt_f32_e32 v17, v2 v_cndmask_b32_e32 v8, s14, v8, vcc_lo s_waitcnt_depctr 0xfff v_dual_mul_f32 v14, s5, v6 :: v_dual_add_nc_u32 v23, 1, v17 v_add_nc_u32_e32 v21, -1, v17 v_div_scale_f32 v11, null, v8, v8, v1 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) v_fmac_f32_e32 v14, s4, v5 v_rcp_f32_e32 v12, v11 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1) v_fmac_f32_e32 v14, s6, v7 s_waitcnt_depctr 0xfff v_fma_f32 v16, -v11, v12, 1.0 v_fmac_f32_e32 v12, v16, v12 v_fmac_f32_e32 v0, s9, v9 v_mul_f32_e64 v16, s12, s5 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_div_scale_f32 v10, null, s14, s14, v0 v_fmac_f32_e64 v16, s19, s4 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_rcp_f32_e32 v9, v10 v_fmac_f32_e64 v16, s13, s6 s_waitcnt_depctr 0xfff v_fma_f32 v13, -v10, v9, 1.0 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_fmac_f32_e32 v9, v13, v9 v_div_scale_f32 v13, s1, v1, v8, v1 v_mul_f32_e32 v19, v13, v12 v_div_scale_f32 v15, vcc_lo, v0, s14, v0 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_fma_f32 v22, -v11, v19, v13 v_dual_mul_f32 v18, v15, v9 :: v_dual_fmac_f32 v19, v22, v12 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3) v_fma_f32 v20, -v10, v18, v15 v_fma_f32 v22, -v23, v17, v2 v_fma_f32 v11, -v11, v19, v13 s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_fmac_f32_e32 v18, v20, v9 v_fma_f32 v20, -v21, v17, v2 v_fma_f32 v10, -v10, v18, v15 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_cmp_ge_f32_e64 s2, 0, v20 v_div_fmas_f32 v9, v10, v9, v18 s_mov_b32 vcc_lo, s1 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_4) v_cndmask_b32_e64 v15, v17, v21, s2 v_div_fmas_f32 v10, v11, v12, v19 v_mul_f32_e32 v11, v16, v14 v_cmp_lt_f32_e32 vcc_lo, 0, v22 v_div_fixup_f32 v0, v9, s14, v0 v_div_fixup_f32 v1, v10, v8, v1 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_dual_mul_f32 v1, v1, v1 :: v_dual_cndmask_b32 v12, v15, v23 v_cmp_gt_f32_e32 vcc_lo, 0xf800000, v11 v_dual_fmac_f32 v1, v0, v0 :: v_dual_mul_f32 v10, 0x4f800000, v11 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) v_mul_f32_e32 v13, 0x37800000, v12 v_mul_f32_e32 v1, -2.0, v1 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) v_cndmask_b32_e32 v9, v11, v10, vcc_lo v_cndmask_b32_e64 v10, v12, v13, s0 v_cmp_class_f32_e64 s0, v2, 0x260 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) v_sqrt_f32_e32 v0, v9 v_cndmask_b32_e64 v2, v10, v2, s0 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_2) v_div_scale_f32 v10, null, v2, v2, v1 s_waitcnt_depctr 0xfff v_add_nc_u32_e32 v11, -1, v0 v_rcp_f32_e32 v13, v10 v_add_nc_u32_e32 v12, 1, v0 v_fma_f32 v14, -v11, v0, v9 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_fma_f32 v15, -v12, v0, v9 v_cmp_ge_f32_e64 s0, 0, v14 v_mul_f32_e64 v14, 0x41490fda, s14 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(TRANS32_DEP_1) v_cndmask_b32_e64 v0, v0, v11, s0 v_fma_f32 v11, -v10, v13, 1.0 v_cmp_lt_f32_e64 s0, 0, v15 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_dual_mul_f32 v8, v14, v8 :: v_dual_fmac_f32 v13, v11, v13 v_cndmask_b32_e64 v0, v0, v12, s0 v_div_scale_f32 v12, s0, v1, v2, v1 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_mul_f32_e32 v11, 0x37800000, v0 v_mul_f32_e32 v15, v12, v13 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_cndmask_b32_e32 v0, v0, v11, vcc_lo v_fma_f32 v11, -v10, v15, v12 v_cmp_class_f32_e64 vcc_lo, v9, 0x260 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4) v_fmac_f32_e32 v15, v11, v13 v_cndmask_b32_e32 v0, v0, v9, vcc_lo s_mov_b32 vcc_lo, s0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) v_mul_f32_e32 v0, v8, v0 v_fma_f32 v8, -v10, v15, v12 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_div_scale_f32 v9, null, v0, v0, 1.0 v_div_fmas_f32 v8, v8, v13, v15 v_div_scale_f32 v13, vcc_lo, 1.0, v0, 1.0 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2) v_rcp_f32_e32 v10, v9 v_div_fixup_f32 v1, v8, v2, v1 s_waitcnt_depctr 0xfff v_fma_f32 v8, -v9, v10, 1.0 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) v_fmac_f32_e32 v10, v8, v10 v_mul_f32_e32 v2, 0x3fb8aa3b, v1 v_fma_f32 v11, v1, 0x3fb8aa3b, -v2 v_rndne_f32_e32 v12, v2 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_fmamk_f32 v8, v1, 0x32a5705f, v11 v_dual_sub_f32 v2, v2, v12 :: v_dual_mul_f32 v11, v13, v10 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_f32_e32 v2, v2, v8 v_fma_f32 v8, -v9, v11, v13 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1) v_exp_f32_e32 v2, v2 v_fmac_f32_e32 v11, v8, v10 v_cvt_i32_f32_e32 v8, v12 s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_2) v_fma_f32 v9, -v9, v11, v13 s_waitcnt_depctr 0xfff v_ldexp_f32 v2, v2, v8 v_div_fmas_f32 v8, v9, v10, v11 v_cmp_ngt_f32_e32 vcc_lo, 0xc2ce8ed0, v1 v_div_fixup_f32 v0, v8, v0, 1.0 s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_cndmask_b32_e32 v2, 0, v2, vcc_lo v_cmp_nlt_f32_e32 vcc_lo, 0x42b17218, v1 v_cndmask_b32_e32 v1, 0x7f800000, v2, vcc_lo s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mul_f32_e32 v2, v0, v1 v_mul_f32_e32 v1, v6, v2 v_mul_f32_e32 v0, v5, v2 v_mul_f32_e32 v2, v7, v2 global_store_b96 v[3:4], v[0:2], off s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z11ward_kernelP15HIP_vector_typeIfLj3EEjS0_S0_S0_S0_ffb .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 328 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 24 .amdhsa_next_free_sgpr 20 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z11ward_kernelP15HIP_vector_typeIfLj3EEjS0_S0_S0_S0_ffb, .Lfunc_end0-_Z11ward_kernelP15HIP_vector_typeIfLj3EEjS0_S0_S0_S0_ffb .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .offset: 8 .size: 4 .value_kind: by_value - .offset: 12 .size: 12 .value_kind: by_value - .offset: 24 .size: 12 .value_kind: by_value - .offset: 36 .size: 12 .value_kind: by_value - .offset: 48 .size: 12 .value_kind: by_value - .offset: 60 .size: 4 .value_kind: by_value - .offset: 64 .size: 4 .value_kind: by_value - .offset: 68 .size: 1 .value_kind: by_value - .offset: 72 .size: 4 .value_kind: hidden_block_count_x - .offset: 76 .size: 4 .value_kind: hidden_block_count_y - .offset: 80 .size: 4 .value_kind: hidden_block_count_z - .offset: 84 .size: 2 .value_kind: hidden_group_size_x - .offset: 86 .size: 2 .value_kind: hidden_group_size_y - .offset: 88 .size: 2 .value_kind: hidden_group_size_z - .offset: 90 .size: 2 .value_kind: hidden_remainder_x - .offset: 92 .size: 2 .value_kind: hidden_remainder_y - .offset: 94 .size: 2 .value_kind: hidden_remainder_z - .offset: 112 .size: 8 .value_kind: hidden_global_offset_x - .offset: 120 .size: 8 .value_kind: hidden_global_offset_y - .offset: 128 .size: 8 .value_kind: hidden_global_offset_z - .offset: 136 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 328 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z11ward_kernelP15HIP_vector_typeIfLj3EEjS0_S0_S0_S0_ffb .private_segment_fixed_size: 0 .sgpr_count: 22 .sgpr_spill_count: 0 .symbol: _Z11ward_kernelP15HIP_vector_typeIfLj3EEjS0_S0_S0_S0_ffb.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 24 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include "ward_implement.h" #include "brdf_common.h" __global__ void ward_kernel(float3* pos, unsigned int width, float3 V, float3 N, float3 X, float3 Y, float alpha_x, float alpha_y, bool anisotropic) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; float3 L = calculateL(pos, width, x, y); float3 H = normalize(add(L, V)); float ax = alpha_x; float ay = anisotropic ? alpha_y : alpha_x; float exponent = -2.f * (sqr(dot(H,X) / ax) + sqr(dot(H,Y) / ay)) / sqrt(dot(H, N)); float spec = 1.f / (4.f * 3.1415926f * ax * ay * sqrt(dot(L,N) * dot(V, N))); spec *= exp(exponent); pos[y*width+x] = scale(L, spec); } extern "C" void ward_brdf(float3 *pos, unsigned numVertices, unsigned width, float3 V, float3 N, float3 X, float3 Y, float alpha_x, float alpha_y, bool anisotropic) { dim3 block(8, 8, 1); unsigned height = numVertices / width; dim3 grid(width / block.x, height / block.y, 1); ward_kernel<<< grid, block>>>(pos, width, V, N, X, Y, alpha_x, alpha_y, anisotropic); }
.text .file "ward.hip" .globl _Z26__device_stub__ward_kernelP15HIP_vector_typeIfLj3EEjS0_S0_S0_S0_ffb # -- Begin function _Z26__device_stub__ward_kernelP15HIP_vector_typeIfLj3EEjS0_S0_S0_S0_ffb .p2align 4, 0x90 .type _Z26__device_stub__ward_kernelP15HIP_vector_typeIfLj3EEjS0_S0_S0_S0_ffb,@function _Z26__device_stub__ward_kernelP15HIP_vector_typeIfLj3EEjS0_S0_S0_S0_ffb: # @_Z26__device_stub__ward_kernelP15HIP_vector_typeIfLj3EEjS0_S0_S0_S0_ffb .cfi_startproc # %bb.0: subq $200, %rsp .cfi_def_cfa_offset 208 movlps %xmm0, 112(%rsp) movss %xmm1, 120(%rsp) movlps %xmm2, 96(%rsp) movss %xmm3, 104(%rsp) movlps %xmm4, 80(%rsp) movss %xmm5, 88(%rsp) movlps %xmm6, 64(%rsp) movss %xmm7, 72(%rsp) movq %rdi, 56(%rsp) movl %esi, 4(%rsp) movb %dl, 3(%rsp) leaq 56(%rsp), %rax movq %rax, 128(%rsp) leaq 4(%rsp), %rax movq %rax, 136(%rsp) leaq 112(%rsp), %rax movq %rax, 144(%rsp) leaq 96(%rsp), %rax movq %rax, 152(%rsp) leaq 80(%rsp), %rax movq %rax, 160(%rsp) leaq 64(%rsp), %rax movq %rax, 168(%rsp) leaq 208(%rsp), %rax movq %rax, 176(%rsp) leaq 216(%rsp), %rax movq %rax, 184(%rsp) leaq 3(%rsp), %rax movq %rax, 192(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 128(%rsp), %r9 movl $_Z11ward_kernelP15HIP_vector_typeIfLj3EEjS0_S0_S0_S0_ffb, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $216, %rsp .cfi_adjust_cfa_offset -216 retq .Lfunc_end0: .size _Z26__device_stub__ward_kernelP15HIP_vector_typeIfLj3EEjS0_S0_S0_S0_ffb, .Lfunc_end0-_Z26__device_stub__ward_kernelP15HIP_vector_typeIfLj3EEjS0_S0_S0_S0_ffb .cfi_endproc # -- End function .globl ward_brdf # -- Begin function ward_brdf .p2align 4, 0x90 .type ward_brdf,@function ward_brdf: # @ward_brdf .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %rbx .cfi_def_cfa_offset 32 subq $304, %rsp # imm = 0x130 .cfi_def_cfa_offset 336 .cfi_offset %rbx, -32 .cfi_offset %r14, -24 .cfi_offset %rbp, -16 movl %ecx, %ebx movss %xmm7, 24(%rsp) # 4-byte Spill movaps %xmm6, 288(%rsp) # 16-byte Spill movss %xmm5, 20(%rsp) # 4-byte Spill movaps %xmm4, 272(%rsp) # 16-byte Spill movss %xmm3, 16(%rsp) # 4-byte Spill movaps %xmm2, 256(%rsp) # 16-byte Spill movss %xmm1, 12(%rsp) # 4-byte Spill movaps %xmm0, 240(%rsp) # 16-byte Spill movl %edx, %ebp movl %esi, %eax movq %rdi, %r14 xorl %edx, %edx divl %ebp # kill: def $eax killed $eax def $rax movl %ebp, %edi shrl $3, %edi shrl $3, %eax shlq $32, %rax orq %rax, %rdi movabsq $34359738376, %rdx # imm = 0x800000008 movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_2 # %bb.1: movss 344(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero movss 336(%rsp), %xmm1 # xmm1 = mem[0],zero,zero,zero movaps 240(%rsp), %xmm2 # 16-byte Reload movlps %xmm2, 144(%rsp) movss 12(%rsp), %xmm2 # 4-byte Reload # xmm2 = mem[0],zero,zero,zero movss %xmm2, 152(%rsp) movaps 256(%rsp), %xmm2 # 16-byte Reload movlps %xmm2, 128(%rsp) movss 16(%rsp), %xmm2 # 4-byte Reload # xmm2 = mem[0],zero,zero,zero movss %xmm2, 136(%rsp) movaps 272(%rsp), %xmm2 # 16-byte Reload movlps %xmm2, 112(%rsp) movss 20(%rsp), %xmm2 # 4-byte Reload # xmm2 = mem[0],zero,zero,zero movss %xmm2, 120(%rsp) movaps 288(%rsp), %xmm2 # 16-byte Reload movlps %xmm2, 96(%rsp) movss 24(%rsp), %xmm2 # 4-byte Reload # xmm2 = mem[0],zero,zero,zero movss %xmm2, 104(%rsp) movq %r14, 88(%rsp) movl %ebp, 36(%rsp) movss %xmm1, 32(%rsp) movss %xmm0, 28(%rsp) movb %bl, 11(%rsp) leaq 88(%rsp), %rax movq %rax, 160(%rsp) leaq 36(%rsp), %rax movq %rax, 168(%rsp) leaq 144(%rsp), %rax movq %rax, 176(%rsp) leaq 128(%rsp), %rax movq %rax, 184(%rsp) leaq 112(%rsp), %rax movq %rax, 192(%rsp) leaq 96(%rsp), %rax movq %rax, 200(%rsp) leaq 32(%rsp), %rax movq %rax, 208(%rsp) leaq 28(%rsp), %rax movq %rax, 216(%rsp) leaq 11(%rsp), %rax movq %rax, 224(%rsp) leaq 72(%rsp), %rdi leaq 56(%rsp), %rsi leaq 48(%rsp), %rdx leaq 40(%rsp), %rcx callq __hipPopCallConfiguration movq 72(%rsp), %rsi movl 80(%rsp), %edx movq 56(%rsp), %rcx movl 64(%rsp), %r8d leaq 160(%rsp), %r9 movl $_Z11ward_kernelP15HIP_vector_typeIfLj3EEjS0_S0_S0_S0_ffb, %edi pushq 40(%rsp) .cfi_adjust_cfa_offset 8 pushq 56(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_2: addq $304, %rsp # imm = 0x130 .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size ward_brdf, .Lfunc_end1-ward_brdf .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB2_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB2_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z11ward_kernelP15HIP_vector_typeIfLj3EEjS0_S0_S0_S0_ffb, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end2: .size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB3_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB3_2: retq .Lfunc_end3: .size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor .cfi_endproc # -- End function .type _Z11ward_kernelP15HIP_vector_typeIfLj3EEjS0_S0_S0_S0_ffb,@object # @_Z11ward_kernelP15HIP_vector_typeIfLj3EEjS0_S0_S0_S0_ffb .section .rodata,"a",@progbits .globl _Z11ward_kernelP15HIP_vector_typeIfLj3EEjS0_S0_S0_S0_ffb .p2align 3, 0x0 _Z11ward_kernelP15HIP_vector_typeIfLj3EEjS0_S0_S0_S0_ffb: .quad _Z26__device_stub__ward_kernelP15HIP_vector_typeIfLj3EEjS0_S0_S0_S0_ffb .size _Z11ward_kernelP15HIP_vector_typeIfLj3EEjS0_S0_S0_S0_ffb, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z11ward_kernelP15HIP_vector_typeIfLj3EEjS0_S0_S0_S0_ffb" .size .L__unnamed_1, 57 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z26__device_stub__ward_kernelP15HIP_vector_typeIfLj3EEjS0_S0_S0_S0_ffb .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z11ward_kernelP15HIP_vector_typeIfLj3EEjS0_S0_S0_S0_ffb .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_000c8787_00000000-6_ward.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2041: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2041: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z49__device_stub__Z11ward_kernelP6float3jS_S_S_S_ffbP6float3jRS_S1_S1_S1_ffb .type _Z49__device_stub__Z11ward_kernelP6float3jS_S_S_S_ffbP6float3jRS_S1_S1_S1_ffb, @function _Z49__device_stub__Z11ward_kernelP6float3jS_S_S_S_ffbP6float3jRS_S1_S1_S1_ffb: .LFB2063: .cfi_startproc endbr64 subq $184, %rsp .cfi_def_cfa_offset 192 movq %rdi, 24(%rsp) movl %esi, 20(%rsp) movss %xmm0, 16(%rsp) movss %xmm1, 12(%rsp) movl 192(%rsp), %eax movb %al, 8(%rsp) movq %fs:40, %rax movq %rax, 168(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 20(%rsp), %rax movq %rax, 104(%rsp) movq %rdx, 112(%rsp) movq %rcx, 120(%rsp) movq %r8, 128(%rsp) movq %r9, 136(%rsp) leaq 16(%rsp), %rax movq %rax, 144(%rsp) leaq 12(%rsp), %rax movq %rax, 152(%rsp) leaq 8(%rsp), %rax movq %rax, 160(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 168(%rsp), %rax subq %fs:40, %rax jne .L8 addq $184, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 200 pushq 40(%rsp) .cfi_def_cfa_offset 208 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z11ward_kernelP6float3jS_S_S_S_ffb(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 192 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2063: .size _Z49__device_stub__Z11ward_kernelP6float3jS_S_S_S_ffbP6float3jRS_S1_S1_S1_ffb, .-_Z49__device_stub__Z11ward_kernelP6float3jS_S_S_S_ffbP6float3jRS_S1_S1_S1_ffb .globl _Z11ward_kernelP6float3jS_S_S_S_ffb .type _Z11ward_kernelP6float3jS_S_S_S_ffb, @function _Z11ward_kernelP6float3jS_S_S_S_ffb: .LFB2064: .cfi_startproc endbr64 subq $72, %rsp .cfi_def_cfa_offset 80 movq %xmm0, 48(%rsp) movss %xmm1, 56(%rsp) movq %xmm2, 32(%rsp) movss %xmm3, 40(%rsp) movq %xmm4, 16(%rsp) movss %xmm5, 24(%rsp) movq %xmm6, (%rsp) movss %xmm7, 8(%rsp) leaq 32(%rsp), %rcx leaq 48(%rsp), %rax subq $8, %rsp .cfi_def_cfa_offset 88 movzbl %dl, %edx pushq %rdx .cfi_def_cfa_offset 96 movss 104(%rsp), %xmm1 movss 96(%rsp), %xmm0 leaq 16(%rsp), %r9 leaq 32(%rsp), %r8 movq %rax, %rdx call _Z49__device_stub__Z11ward_kernelP6float3jS_S_S_S_ffbP6float3jRS_S1_S1_S1_ffb addq $88, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2064: .size _Z11ward_kernelP6float3jS_S_S_S_ffb, .-_Z11ward_kernelP6float3jS_S_S_S_ffb .globl ward_brdf .type ward_brdf, @function ward_brdf: .LFB2038: .cfi_startproc endbr64 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 subq $144, %rsp .cfi_def_cfa_offset 176 movq %rdi, %r12 movl %esi, %eax movl %edx, %ebx movl %ecx, %ebp movq %xmm0, 48(%rsp) movss %xmm1, 56(%rsp) movq %xmm2, 32(%rsp) movss %xmm3, 40(%rsp) movq %xmm4, 16(%rsp) movss %xmm5, 24(%rsp) movq %xmm6, (%rsp) movss %xmm7, 8(%rsp) movq %fs:40, %rdx movq %rdx, 136(%rsp) xorl %edx, %edx movl %ebx, %edx shrl $3, %edx movl %edx, 76(%rsp) movl $0, %edx divl %ebx shrl $3, %eax movl %eax, 80(%rsp) movl $8, 64(%rsp) movl $8, 68(%rsp) movl $0, %r9d movl $0, %r8d movq 64(%rsp), %rdx movl $1, %ecx movq 76(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L15 .L11: movq 136(%rsp), %rax subq %fs:40, %rax jne .L16 addq $144, %rsp .cfi_remember_state .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .L15: .cfi_restore_state movq 48(%rsp), %rax movq %rax, 88(%rsp) movl 56(%rsp), %eax movl %eax, 96(%rsp) movq 32(%rsp), %rax movq %rax, 100(%rsp) movl 40(%rsp), %eax movl %eax, 108(%rsp) movq 16(%rsp), %rax movq %rax, 112(%rsp) movl 24(%rsp), %eax movl %eax, 120(%rsp) movq (%rsp), %rax movq %rax, 124(%rsp) movl 8(%rsp), %eax movl %eax, 132(%rsp) leaq 100(%rsp), %rcx leaq 88(%rsp), %rdx subq $8, %rsp .cfi_def_cfa_offset 184 movzbl %bpl, %ebp pushq %rbp .cfi_def_cfa_offset 192 movss 200(%rsp), %xmm1 movss 192(%rsp), %xmm0 leaq 140(%rsp), %r9 leaq 128(%rsp), %r8 movl %ebx, %esi movq %r12, %rdi call _Z49__device_stub__Z11ward_kernelP6float3jS_S_S_S_ffbP6float3jRS_S1_S1_S1_ffb addq $16, %rsp .cfi_def_cfa_offset 176 jmp .L11 .L16: call __stack_chk_fail@PLT .cfi_endproc .LFE2038: .size ward_brdf, .-ward_brdf .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC0: .string "_Z11ward_kernelP6float3jS_S_S_S_ffb" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2066: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z11ward_kernelP6float3jS_S_S_S_ffb(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2066: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "ward.hip" .globl _Z26__device_stub__ward_kernelP15HIP_vector_typeIfLj3EEjS0_S0_S0_S0_ffb # -- Begin function _Z26__device_stub__ward_kernelP15HIP_vector_typeIfLj3EEjS0_S0_S0_S0_ffb .p2align 4, 0x90 .type _Z26__device_stub__ward_kernelP15HIP_vector_typeIfLj3EEjS0_S0_S0_S0_ffb,@function _Z26__device_stub__ward_kernelP15HIP_vector_typeIfLj3EEjS0_S0_S0_S0_ffb: # @_Z26__device_stub__ward_kernelP15HIP_vector_typeIfLj3EEjS0_S0_S0_S0_ffb .cfi_startproc # %bb.0: subq $200, %rsp .cfi_def_cfa_offset 208 movlps %xmm0, 112(%rsp) movss %xmm1, 120(%rsp) movlps %xmm2, 96(%rsp) movss %xmm3, 104(%rsp) movlps %xmm4, 80(%rsp) movss %xmm5, 88(%rsp) movlps %xmm6, 64(%rsp) movss %xmm7, 72(%rsp) movq %rdi, 56(%rsp) movl %esi, 4(%rsp) movb %dl, 3(%rsp) leaq 56(%rsp), %rax movq %rax, 128(%rsp) leaq 4(%rsp), %rax movq %rax, 136(%rsp) leaq 112(%rsp), %rax movq %rax, 144(%rsp) leaq 96(%rsp), %rax movq %rax, 152(%rsp) leaq 80(%rsp), %rax movq %rax, 160(%rsp) leaq 64(%rsp), %rax movq %rax, 168(%rsp) leaq 208(%rsp), %rax movq %rax, 176(%rsp) leaq 216(%rsp), %rax movq %rax, 184(%rsp) leaq 3(%rsp), %rax movq %rax, 192(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 128(%rsp), %r9 movl $_Z11ward_kernelP15HIP_vector_typeIfLj3EEjS0_S0_S0_S0_ffb, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $216, %rsp .cfi_adjust_cfa_offset -216 retq .Lfunc_end0: .size _Z26__device_stub__ward_kernelP15HIP_vector_typeIfLj3EEjS0_S0_S0_S0_ffb, .Lfunc_end0-_Z26__device_stub__ward_kernelP15HIP_vector_typeIfLj3EEjS0_S0_S0_S0_ffb .cfi_endproc # -- End function .globl ward_brdf # -- Begin function ward_brdf .p2align 4, 0x90 .type ward_brdf,@function ward_brdf: # @ward_brdf .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %rbx .cfi_def_cfa_offset 32 subq $304, %rsp # imm = 0x130 .cfi_def_cfa_offset 336 .cfi_offset %rbx, -32 .cfi_offset %r14, -24 .cfi_offset %rbp, -16 movl %ecx, %ebx movss %xmm7, 24(%rsp) # 4-byte Spill movaps %xmm6, 288(%rsp) # 16-byte Spill movss %xmm5, 20(%rsp) # 4-byte Spill movaps %xmm4, 272(%rsp) # 16-byte Spill movss %xmm3, 16(%rsp) # 4-byte Spill movaps %xmm2, 256(%rsp) # 16-byte Spill movss %xmm1, 12(%rsp) # 4-byte Spill movaps %xmm0, 240(%rsp) # 16-byte Spill movl %edx, %ebp movl %esi, %eax movq %rdi, %r14 xorl %edx, %edx divl %ebp # kill: def $eax killed $eax def $rax movl %ebp, %edi shrl $3, %edi shrl $3, %eax shlq $32, %rax orq %rax, %rdi movabsq $34359738376, %rdx # imm = 0x800000008 movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_2 # %bb.1: movss 344(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero movss 336(%rsp), %xmm1 # xmm1 = mem[0],zero,zero,zero movaps 240(%rsp), %xmm2 # 16-byte Reload movlps %xmm2, 144(%rsp) movss 12(%rsp), %xmm2 # 4-byte Reload # xmm2 = mem[0],zero,zero,zero movss %xmm2, 152(%rsp) movaps 256(%rsp), %xmm2 # 16-byte Reload movlps %xmm2, 128(%rsp) movss 16(%rsp), %xmm2 # 4-byte Reload # xmm2 = mem[0],zero,zero,zero movss %xmm2, 136(%rsp) movaps 272(%rsp), %xmm2 # 16-byte Reload movlps %xmm2, 112(%rsp) movss 20(%rsp), %xmm2 # 4-byte Reload # xmm2 = mem[0],zero,zero,zero movss %xmm2, 120(%rsp) movaps 288(%rsp), %xmm2 # 16-byte Reload movlps %xmm2, 96(%rsp) movss 24(%rsp), %xmm2 # 4-byte Reload # xmm2 = mem[0],zero,zero,zero movss %xmm2, 104(%rsp) movq %r14, 88(%rsp) movl %ebp, 36(%rsp) movss %xmm1, 32(%rsp) movss %xmm0, 28(%rsp) movb %bl, 11(%rsp) leaq 88(%rsp), %rax movq %rax, 160(%rsp) leaq 36(%rsp), %rax movq %rax, 168(%rsp) leaq 144(%rsp), %rax movq %rax, 176(%rsp) leaq 128(%rsp), %rax movq %rax, 184(%rsp) leaq 112(%rsp), %rax movq %rax, 192(%rsp) leaq 96(%rsp), %rax movq %rax, 200(%rsp) leaq 32(%rsp), %rax movq %rax, 208(%rsp) leaq 28(%rsp), %rax movq %rax, 216(%rsp) leaq 11(%rsp), %rax movq %rax, 224(%rsp) leaq 72(%rsp), %rdi leaq 56(%rsp), %rsi leaq 48(%rsp), %rdx leaq 40(%rsp), %rcx callq __hipPopCallConfiguration movq 72(%rsp), %rsi movl 80(%rsp), %edx movq 56(%rsp), %rcx movl 64(%rsp), %r8d leaq 160(%rsp), %r9 movl $_Z11ward_kernelP15HIP_vector_typeIfLj3EEjS0_S0_S0_S0_ffb, %edi pushq 40(%rsp) .cfi_adjust_cfa_offset 8 pushq 56(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_2: addq $304, %rsp # imm = 0x130 .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size ward_brdf, .Lfunc_end1-ward_brdf .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB2_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB2_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z11ward_kernelP15HIP_vector_typeIfLj3EEjS0_S0_S0_S0_ffb, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end2: .size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB3_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB3_2: retq .Lfunc_end3: .size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor .cfi_endproc # -- End function .type _Z11ward_kernelP15HIP_vector_typeIfLj3EEjS0_S0_S0_S0_ffb,@object # @_Z11ward_kernelP15HIP_vector_typeIfLj3EEjS0_S0_S0_S0_ffb .section .rodata,"a",@progbits .globl _Z11ward_kernelP15HIP_vector_typeIfLj3EEjS0_S0_S0_S0_ffb .p2align 3, 0x0 _Z11ward_kernelP15HIP_vector_typeIfLj3EEjS0_S0_S0_S0_ffb: .quad _Z26__device_stub__ward_kernelP15HIP_vector_typeIfLj3EEjS0_S0_S0_S0_ffb .size _Z11ward_kernelP15HIP_vector_typeIfLj3EEjS0_S0_S0_S0_ffb, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z11ward_kernelP15HIP_vector_typeIfLj3EEjS0_S0_S0_S0_ffb" .size .L__unnamed_1, 57 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z26__device_stub__ward_kernelP15HIP_vector_typeIfLj3EEjS0_S0_S0_S0_ffb .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z11ward_kernelP15HIP_vector_typeIfLj3EEjS0_S0_S0_S0_ffb .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include <cstdio> #include <cstdlib> #include <sys/time.h> #include <cassert> __global__ void threads2d() { printf("threadIdx:(%d, %d, %d) blockIdx:(%d, %d, %d) blockDim:(%d, %d, %d) " "gridDim:(%d, %d, %d)\n", threadIdx.x, threadIdx.y, threadIdx.z, blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y, blockDim.z, gridDim.x,gridDim.y,gridDim.z); } int main (int argc, char** argv) { int nElem = 36; dim3 block(3,3); dim3 grid((nElem + block.x - 1) / block.x, (nElem + block.x - 1) / block.x); fprintf(stdout,"grid.x %d grid.y %d grid.z %d\n", grid.x, grid.y, grid.z); fprintf(stdout,"block.x %d block.y %d block.z %d\n", block.x, block.y, block.z); threads2d<<<grid, block>>>(); cudaDeviceSynchronize(); return 0; }
code for sm_80 Function : _Z9threads2dv .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fc800078e00ff */ /*0010*/ S2R R11, SR_CTAID.X ; /* 0x00000000000b7919 */ /* 0x000e220000002500 */ /*0020*/ IADD3 R1, R1, -0x30, RZ ; /* 0xffffffd001017810 */ /* 0x000fe20007ffe0ff */ /*0030*/ IMAD.MOV.U32 R14, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff0e7624 */ /* 0x000fe200078e00ff */ /*0040*/ MOV R18, c[0x0][0x10] ; /* 0x0000040000127a02 */ /* 0x000fe20000000f00 */ /*0050*/ S2R R10, SR_TID.Z ; /* 0x00000000000a7919 */ /* 0x000e220000002300 */ /*0060*/ IMAD.MOV.U32 R15, RZ, RZ, c[0x0][0x4] ; /* 0x00000100ff0f7624 */ /* 0x000fe200078e00ff */ /*0070*/ MOV R0, 0x0 ; /* 0x0000000000007802 */ /* 0x000fe20000000f00 */ /*0080*/ IMAD.MOV.U32 R16, RZ, RZ, c[0x0][0x8] ; /* 0x00000200ff107624 */ /* 0x000fe200078e00ff */ /*0090*/ S2R R9, SR_TID.Y ; /* 0x0000000000097919 */ /* 0x000e220000002200 */ /*00a0*/ IMAD.MOV.U32 R17, RZ, RZ, c[0x0][0xc] ; /* 0x00000300ff117624 */ /* 0x000fe200078e00ff */ /*00b0*/ LDC.64 R2, c[0x4][R0] ; /* 0x0100000000027b82 */ /* 0x0002a20000000a00 */ /*00c0*/ IMAD.MOV.U32 R19, RZ, RZ, c[0x0][0x14] ; /* 0x00000500ff137624 */ /* 0x000fe200078e00ff */ /*00d0*/ S2R R8, SR_TID.X ; /* 0x0000000000087919 */ /* 0x000e220000002100 */ /*00e0*/ IADD3 R6, P0, R1, c[0x0][0x20], RZ ; /* 0x0000080001067a10 */ /* 0x000fe20007f1e0ff */ /*00f0*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x4][0x8] ; /* 0x01000200ff047624 */ /* 0x000fe200078e00ff */ /*0100*/ MOV R5, c[0x4][0xc] ; /* 0x0100030000057a02 */ /* 0x000fe20000000f00 */ /*0110*/ S2R R13, SR_CTAID.Z ; /* 0x00000000000d7919 */ /* 0x000ee40000002700 */ /*0120*/ IMAD.X R7, RZ, RZ, c[0x0][0x24], P0 ; /* 0x00000900ff077624 */ /* 0x000fc400000e06ff */ /*0130*/ S2R R12, SR_CTAID.Y ; /* 0x00000000000c7919 */ /* 0x000ee80000002600 */ /*0140*/ STL.128 [R1+0x20], R16 ; /* 0x0000201001007387 */ /* 0x0003e80000100c00 */ /*0150*/ STL.128 [R1], R8 ; /* 0x0000000801007387 */ /* 0x0013e80000100c00 */ /*0160*/ STL.128 [R1+0x10], R12 ; /* 0x0000100c01007387 */ /* 0x0083e40000100c00 */ /*0170*/ LEPC R8 ; /* 0x000000000008734e */ /* 0x006fe40000000000 */ /*0180*/ MOV R11, 0x1f0 ; /* 0x000001f0000b7802 */ /* 0x000fe40000000f00 */ /*0190*/ MOV R20, 0x170 ; /* 0x0000017000147802 */ /* 0x000fc40000000f00 */ /*01a0*/ MOV R21, 0x0 ; /* 0x0000000000157802 */ /* 0x000fe40000000f00 */ /*01b0*/ MOV R0, 0x0 ; /* 0x0000000000007802 */ /* 0x000fe40000000f00 */ /*01c0*/ IADD3 R20, P0, P1, -R20, R11, R8 ; /* 0x0000000b14147210 */ /* 0x000fc8000791e108 */ /*01d0*/ IADD3.X R21, ~R0, R21, R9, P0, P1 ; /* 0x0000001500157210 */ /* 0x000fc800007e2509 */ /*01e0*/ CALL.ABS.NOINC R2 ; /* 0x0000000002007343 */ /* 0x000fea0003c00000 */ /*01f0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0200*/ BRA 0x200; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0210*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0220*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0230*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0240*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0250*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0260*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0270*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0280*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0290*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include <cstdio> #include <cstdlib> #include <sys/time.h> #include <cassert> __global__ void threads2d() { printf("threadIdx:(%d, %d, %d) blockIdx:(%d, %d, %d) blockDim:(%d, %d, %d) " "gridDim:(%d, %d, %d)\n", threadIdx.x, threadIdx.y, threadIdx.z, blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y, blockDim.z, gridDim.x,gridDim.y,gridDim.z); } int main (int argc, char** argv) { int nElem = 36; dim3 block(3,3); dim3 grid((nElem + block.x - 1) / block.x, (nElem + block.x - 1) / block.x); fprintf(stdout,"grid.x %d grid.y %d grid.z %d\n", grid.x, grid.y, grid.z); fprintf(stdout,"block.x %d block.y %d block.z %d\n", block.x, block.y, block.z); threads2d<<<grid, block>>>(); cudaDeviceSynchronize(); return 0; }
.file "tmpxft_001336ea_00000000-6_threads-2d.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2060: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2060: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z27__device_stub__Z9threads2dvv .type _Z27__device_stub__Z9threads2dvv, @function _Z27__device_stub__Z9threads2dvv: .LFB2082: .cfi_startproc endbr64 subq $88, %rsp .cfi_def_cfa_offset 96 movq %fs:40, %rax movq %rax, 72(%rsp) xorl %eax, %eax movl $1, 16(%rsp) movl $1, 20(%rsp) movl $1, 24(%rsp) movl $1, 28(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) leaq 8(%rsp), %rcx movq %rsp, %rdx leaq 28(%rsp), %rsi leaq 16(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 72(%rsp), %rax subq %fs:40, %rax jne .L8 addq $88, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 8(%rsp) .cfi_def_cfa_offset 104 pushq 8(%rsp) .cfi_def_cfa_offset 112 leaq 80(%rsp), %r9 movq 44(%rsp), %rcx movl 52(%rsp), %r8d movq 32(%rsp), %rsi movl 40(%rsp), %edx leaq _Z9threads2dv(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 96 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2082: .size _Z27__device_stub__Z9threads2dvv, .-_Z27__device_stub__Z9threads2dvv .globl _Z9threads2dv .type _Z9threads2dv, @function _Z9threads2dv: .LFB2083: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z27__device_stub__Z9threads2dvv addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2083: .size _Z9threads2dv, .-_Z9threads2dv .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC0: .string "grid.x %d grid.y %d grid.z %d\n" .align 8 .LC1: .string "block.x %d block.y %d block.z %d\n" .text .globl main .type main, @function main: .LFB2057: .cfi_startproc endbr64 subq $40, %rsp .cfi_def_cfa_offset 48 movl $1, %r9d movl $12, %r8d movl $12, %ecx leaq .LC0(%rip), %rdx movl $2, %esi movq stdout(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $1, %r9d movl $3, %r8d movl $3, %ecx leaq .LC1(%rip), %rdx movl $2, %esi movq stdout(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $12, 20(%rsp) movl $12, 24(%rsp) movl $3, 8(%rsp) movl $3, 12(%rsp) movl $0, %r9d movl $0, %r8d movq 8(%rsp), %rdx movl $1, %ecx movq 20(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L14 .L12: call cudaDeviceSynchronize@PLT movl $0, %eax addq $40, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L14: .cfi_restore_state call _Z27__device_stub__Z9threads2dvv jmp .L12 .cfi_endproc .LFE2057: .size main, .-main .section .rodata.str1.1,"aMS",@progbits,1 .LC2: .string "_Z9threads2dv" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2085: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC2(%rip), %rdx movq %rdx, %rcx leaq _Z9threads2dv(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2085: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <cstdio> #include <cstdlib> #include <sys/time.h> #include <cassert> __global__ void threads2d() { printf("threadIdx:(%d, %d, %d) blockIdx:(%d, %d, %d) blockDim:(%d, %d, %d) " "gridDim:(%d, %d, %d)\n", threadIdx.x, threadIdx.y, threadIdx.z, blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y, blockDim.z, gridDim.x,gridDim.y,gridDim.z); } int main (int argc, char** argv) { int nElem = 36; dim3 block(3,3); dim3 grid((nElem + block.x - 1) / block.x, (nElem + block.x - 1) / block.x); fprintf(stdout,"grid.x %d grid.y %d grid.z %d\n", grid.x, grid.y, grid.z); fprintf(stdout,"block.x %d block.y %d block.z %d\n", block.x, block.y, block.z); threads2d<<<grid, block>>>(); cudaDeviceSynchronize(); return 0; }
#include <hip/hip_runtime.h> #include <cstdio> #include <cstdlib> #include <sys/time.h> #include <cassert> __global__ void threads2d() { printf("threadIdx:(%d, %d, %d) blockIdx:(%d, %d, %d) blockDim:(%d, %d, %d) " "gridDim:(%d, %d, %d)\n", threadIdx.x, threadIdx.y, threadIdx.z, blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y, blockDim.z, gridDim.x,gridDim.y,gridDim.z); } int main (int argc, char** argv) { int nElem = 36; dim3 block(3,3); dim3 grid((nElem + block.x - 1) / block.x, (nElem + block.x - 1) / block.x); fprintf(stdout,"grid.x %d grid.y %d grid.z %d\n", grid.x, grid.y, grid.z); fprintf(stdout,"block.x %d block.y %d block.z %d\n", block.x, block.y, block.z); threads2d<<<grid, block>>>(); hipDeviceSynchronize(); return 0; }
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include <cstdio> #include <cstdlib> #include <sys/time.h> #include <cassert> __global__ void threads2d() { printf("threadIdx:(%d, %d, %d) blockIdx:(%d, %d, %d) blockDim:(%d, %d, %d) " "gridDim:(%d, %d, %d)\n", threadIdx.x, threadIdx.y, threadIdx.z, blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y, blockDim.z, gridDim.x,gridDim.y,gridDim.z); } int main (int argc, char** argv) { int nElem = 36; dim3 block(3,3); dim3 grid((nElem + block.x - 1) / block.x, (nElem + block.x - 1) / block.x); fprintf(stdout,"grid.x %d grid.y %d grid.z %d\n", grid.x, grid.y, grid.z); fprintf(stdout,"block.x %d block.y %d block.z %d\n", block.x, block.y, block.z); threads2d<<<grid, block>>>(); hipDeviceSynchronize(); return 0; }
.text .file "threads-2d.hip" .globl _Z24__device_stub__threads2dv # -- Begin function _Z24__device_stub__threads2dv .p2align 4, 0x90 .type _Z24__device_stub__threads2dv,@function _Z24__device_stub__threads2dv: # @_Z24__device_stub__threads2dv .cfi_startproc # %bb.0: subq $56, %rsp .cfi_def_cfa_offset 64 leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 48(%rsp), %r9 movl $_Z9threads2dv, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $72, %rsp .cfi_adjust_cfa_offset -72 retq .Lfunc_end0: .size _Z24__device_stub__threads2dv, .Lfunc_end0-_Z24__device_stub__threads2dv .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: subq $56, %rsp .cfi_def_cfa_offset 64 movq stdout(%rip), %rdi movl $.L.str, %esi movl $12, %edx movl $12, %ecx movl $1, %r8d xorl %eax, %eax callq fprintf movq stdout(%rip), %rdi movl $.L.str.1, %esi movl $3, %edx movl $3, %ecx movl $1, %r8d xorl %eax, %eax callq fprintf movabsq $51539607564, %rdi # imm = 0xC0000000C movabsq $12884901891, %rdx # imm = 0x300000003 movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_2 # %bb.1: leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 48(%rsp), %r9 movl $_Z9threads2dv, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_2: callq hipDeviceSynchronize xorl %eax, %eax addq $56, %rsp .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB2_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB2_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z9threads2dv, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end2: .size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB3_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB3_2: retq .Lfunc_end3: .size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor .cfi_endproc # -- End function .type _Z9threads2dv,@object # @_Z9threads2dv .section .rodata,"a",@progbits .globl _Z9threads2dv .p2align 3, 0x0 _Z9threads2dv: .quad _Z24__device_stub__threads2dv .size _Z9threads2dv, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "grid.x %d grid.y %d grid.z %d\n" .size .L.str, 31 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz "block.x %d block.y %d block.z %d\n" .size .L.str.1, 34 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z9threads2dv" .size .L__unnamed_1, 14 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z24__device_stub__threads2dv .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z9threads2dv .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_001336ea_00000000-6_threads-2d.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2060: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2060: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z27__device_stub__Z9threads2dvv .type _Z27__device_stub__Z9threads2dvv, @function _Z27__device_stub__Z9threads2dvv: .LFB2082: .cfi_startproc endbr64 subq $88, %rsp .cfi_def_cfa_offset 96 movq %fs:40, %rax movq %rax, 72(%rsp) xorl %eax, %eax movl $1, 16(%rsp) movl $1, 20(%rsp) movl $1, 24(%rsp) movl $1, 28(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) leaq 8(%rsp), %rcx movq %rsp, %rdx leaq 28(%rsp), %rsi leaq 16(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 72(%rsp), %rax subq %fs:40, %rax jne .L8 addq $88, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 8(%rsp) .cfi_def_cfa_offset 104 pushq 8(%rsp) .cfi_def_cfa_offset 112 leaq 80(%rsp), %r9 movq 44(%rsp), %rcx movl 52(%rsp), %r8d movq 32(%rsp), %rsi movl 40(%rsp), %edx leaq _Z9threads2dv(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 96 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2082: .size _Z27__device_stub__Z9threads2dvv, .-_Z27__device_stub__Z9threads2dvv .globl _Z9threads2dv .type _Z9threads2dv, @function _Z9threads2dv: .LFB2083: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z27__device_stub__Z9threads2dvv addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2083: .size _Z9threads2dv, .-_Z9threads2dv .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC0: .string "grid.x %d grid.y %d grid.z %d\n" .align 8 .LC1: .string "block.x %d block.y %d block.z %d\n" .text .globl main .type main, @function main: .LFB2057: .cfi_startproc endbr64 subq $40, %rsp .cfi_def_cfa_offset 48 movl $1, %r9d movl $12, %r8d movl $12, %ecx leaq .LC0(%rip), %rdx movl $2, %esi movq stdout(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $1, %r9d movl $3, %r8d movl $3, %ecx leaq .LC1(%rip), %rdx movl $2, %esi movq stdout(%rip), %rdi movl $0, %eax call __fprintf_chk@PLT movl $12, 20(%rsp) movl $12, 24(%rsp) movl $3, 8(%rsp) movl $3, 12(%rsp) movl $0, %r9d movl $0, %r8d movq 8(%rsp), %rdx movl $1, %ecx movq 20(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L14 .L12: call cudaDeviceSynchronize@PLT movl $0, %eax addq $40, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L14: .cfi_restore_state call _Z27__device_stub__Z9threads2dvv jmp .L12 .cfi_endproc .LFE2057: .size main, .-main .section .rodata.str1.1,"aMS",@progbits,1 .LC2: .string "_Z9threads2dv" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2085: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC2(%rip), %rdx movq %rdx, %rcx leaq _Z9threads2dv(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2085: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "threads-2d.hip" .globl _Z24__device_stub__threads2dv # -- Begin function _Z24__device_stub__threads2dv .p2align 4, 0x90 .type _Z24__device_stub__threads2dv,@function _Z24__device_stub__threads2dv: # @_Z24__device_stub__threads2dv .cfi_startproc # %bb.0: subq $56, %rsp .cfi_def_cfa_offset 64 leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 48(%rsp), %r9 movl $_Z9threads2dv, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $72, %rsp .cfi_adjust_cfa_offset -72 retq .Lfunc_end0: .size _Z24__device_stub__threads2dv, .Lfunc_end0-_Z24__device_stub__threads2dv .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: subq $56, %rsp .cfi_def_cfa_offset 64 movq stdout(%rip), %rdi movl $.L.str, %esi movl $12, %edx movl $12, %ecx movl $1, %r8d xorl %eax, %eax callq fprintf movq stdout(%rip), %rdi movl $.L.str.1, %esi movl $3, %edx movl $3, %ecx movl $1, %r8d xorl %eax, %eax callq fprintf movabsq $51539607564, %rdi # imm = 0xC0000000C movabsq $12884901891, %rdx # imm = 0x300000003 movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_2 # %bb.1: leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 48(%rsp), %r9 movl $_Z9threads2dv, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_2: callq hipDeviceSynchronize xorl %eax, %eax addq $56, %rsp .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB2_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB2_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z9threads2dv, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end2: .size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB3_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB3_2: retq .Lfunc_end3: .size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor .cfi_endproc # -- End function .type _Z9threads2dv,@object # @_Z9threads2dv .section .rodata,"a",@progbits .globl _Z9threads2dv .p2align 3, 0x0 _Z9threads2dv: .quad _Z24__device_stub__threads2dv .size _Z9threads2dv, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "grid.x %d grid.y %d grid.z %d\n" .size .L.str, 31 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz "block.x %d block.y %d block.z %d\n" .size .L.str.1, 34 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z9threads2dv" .size .L__unnamed_1, 14 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z24__device_stub__threads2dv .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z9threads2dv .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
// Multiplicação de matrizes em CUDA // Disciplina: OPRP001 - Programação Paralela // Prof.: Mauricio Pillon // Aluno: Renato Tanaka #include <cuda.h> #include <stdio.h> #include <math.h> // Matriz Quadrada (nro_linhas = nro_colunas) #define N 4 // Número de linhas // Número de colunas // GPU: Multiplicação das matrizes (a) e (b), resultado em (c) __global__ void matMult (int *da, int *db, int *dc) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; dc[i*N+j] = 0; for(int k=0; k<N; k++) dc[i*N+j] += da[i*N+k] * db[k*N+j]; } // GPU: Imprime índices na matriz __global__ void printIndex (void) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; printf ("[%d][%d]=%d\t(x)\t%d\t%d\t%d\t(y)\t%d\t%d\t%d\n",i,j,(i*N+j), threadIdx.x, blockIdx.x, blockDim.x,threadIdx.y, blockIdx.y, blockDim.y); } // GPU: Inicializa os vetores (a), (b) e (c) na Memória Global __global__ void dirtyMem (int *da, int *db, int *dc) { int i = blockIdx.x * blockDim.x + threadIdx.x; da[i] = -1; db[i] = -2; dc[i] = -3; } // CPU: Inicializa os vetores (a) e (b) __host__ void initvet(int *host_a, int *host_b) { for (int i=0; i < N; i++) { for (int j=0; j < N; j++) { host_b[i*N+j] = (i+j)+((N-1)*i); host_a[i*N+j] = (N*N)-host_b[i*N+j]; } } } // CPU: Imprime matriz __host__ void printMat (int *mat){ for (int j =0; j < N; j++) printf("\t(%d)", j); printf("\n"); for (int i=0; i < N; i++) { printf("(%d)", i); for (int j=0; j < N; j++){ printf("\t%d", mat[i*N+j]); } printf("\n"); } } // CPU: função principal int main(int argc, char const *argv[]) { int *a, *b, *c; int *dev_a, *dev_b, *dev_c; int size; // Alocação de matriz quadrada size = N * N * sizeof(int); // Alocação de memória no host cudaMallocHost((void **) &a, size); cudaMallocHost((void **) &b, size); cudaMallocHost((void **) &c, size); // Alocação de memória na GPU para os vetores (a,b e c) cudaMalloc ((void **) &dev_a, size); cudaMalloc ((void **) &dev_b, size); cudaMalloc ((void **) &dev_c, size); // Atribui valores iniciais aos vetores em GPU dirtyMem<<<N, N>>>(dev_a, dev_b, dev_c); // Cópia GPU para CPU cudaMemcpy (a, dev_a, size, cudaMemcpyDeviceToHost); cudaMemcpy (b, dev_b, size, cudaMemcpyDeviceToHost); cudaMemcpy (c, dev_c, size, cudaMemcpyDeviceToHost); // Impressão na tela dos valores dos vetores printf ("\t ### Valores Inicializados na GPU ###\n"); printf ("\t ### Matriz (a) ### \n"); printMat(a); printf ("\t ### Matriz (b) ### \n"); printMat(b); printf ("\t ### Matriz (c) ### \n"); printMat(c); // Inicialização dos vetores (a) e (b) no host initvet(a,b); // Cópia dos vetores gerados em CPU p/ memória da GPU cudaMemcpy (dev_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy (dev_b, b, size, cudaMemcpyHostToDevice); // Número de blocos e threads p/ dimensões (x,y) dim3 dimBlock (1, 1); dim3 dimThreads(N, N); // Imprime as posições acessadas pelo dimBlock e dimThreads printIndex<<< dimBlock, dimThreads>>>(); // Execução do kernel matMult em GPU matMult<<< dimBlock, dimThreads>>>(dev_a, dev_b, dev_c); cudaDeviceSynchronize(); // Cópia do vetor (c) da GPU (Memória Global) para CPU cudaMemcpy (c, dev_c, size, cudaMemcpyDeviceToHost); // Impressão na tela dos valores dos vetores printf ("\t ### Valores após processamento em GPU ###\n"); printf ("\t ### Matriz (a) ### \n"); printMat(a); printf ("\t ### Matriz (b) ### \n"); printMat(b); printf ("\t ### Matriz (c) ### \n"); printMat(c); // Libera a Memória Global (GPU) cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); // Libera a Memória Global (CPU) cudaFreeHost(a); cudaFreeHost(b); cudaFreeHost(c); return 0; }
code for sm_80 Function : _Z8dirtyMemPiS_S_ .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */ /* 0x000e220000002500 */ /*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */ /* 0x000fe200000001ff */ /*0030*/ MOV R9, 0xffffffff ; /* 0xffffffff00097802 */ /* 0x000fe20000000f00 */ /*0040*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe20000000a00 */ /*0050*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e220000002100 */ /*0060*/ MOV R11, 0xfffffffe ; /* 0xfffffffe000b7802 */ /* 0x000fe40000000f00 */ /*0070*/ MOV R13, 0xfffffffd ; /* 0xfffffffd000d7802 */ /* 0x000fe20000000f00 */ /*0080*/ IMAD R6, R6, c[0x0][0x0], R3 ; /* 0x0000000006067a24 */ /* 0x001fc800078e0203 */ /*0090*/ IMAD.WIDE R2, R6, R7, c[0x0][0x160] ; /* 0x0000580006027625 */ /* 0x000fc800078e0207 */ /*00a0*/ IMAD.WIDE R4, R6.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0006047625 */ /* 0x0c0fe200078e0207 */ /*00b0*/ STG.E [R2.64], R9 ; /* 0x0000000902007986 */ /* 0x000fe6000c101904 */ /*00c0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */ /* 0x000fe200078e0207 */ /*00d0*/ STG.E [R4.64], R11 ; /* 0x0000000b04007986 */ /* 0x000fe8000c101904 */ /*00e0*/ STG.E [R6.64], R13 ; /* 0x0000000d06007986 */ /* 0x000fe2000c101904 */ /*00f0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0100*/ BRA 0x100; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0110*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0120*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0130*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ .......... Function : _Z10printIndexv .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fc800078e00ff */ /*0010*/ S2R R12, SR_CTAID.X ; /* 0x00000000000c7919 */ /* 0x000e220000002500 */ /*0020*/ IADD3 R1, R1, -0x28, RZ ; /* 0xffffffd801017810 */ /* 0x000fe20007ffe0ff */ /*0030*/ IMAD.MOV.U32 R13, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff0d7624 */ /* 0x000fe200078e00ff */ /*0040*/ MOV R0, 0x8 ; /* 0x0000000800007802 */ /* 0x000fe20000000f00 */ /*0050*/ S2R R9, SR_TID.X ; /* 0x0000000000097919 */ /* 0x000e620000002100 */ /*0060*/ IMAD.MOV.U32 R16, RZ, RZ, c[0x0][0x4] ; /* 0x00000100ff107624 */ /* 0x000fe200078e00ff */ /*0070*/ IADD3 R6, P0, R1, c[0x0][0x20], RZ ; /* 0x0000080001067a10 */ /* 0x000fe20007f1e0ff */ /*0080*/ LDC.64 R10, c[0x4][R0] ; /* 0x01000000000a7b82 */ /* 0x0004e20000000a00 */ /*0090*/ S2R R14, SR_TID.Y ; /* 0x00000000000e7919 */ /* 0x000f220000002200 */ /*00a0*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x4][0x0] ; /* 0x01000000ff047624 */ /* 0x000fe400078e00ff */ /*00b0*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x4][0x4] ; /* 0x01000100ff057624 */ /* 0x000fe200078e00ff */ /*00c0*/ S2R R15, SR_CTAID.Y ; /* 0x00000000000f7919 */ /* 0x000f220000002600 */ /*00d0*/ IMAD.X R7, RZ, RZ, c[0x0][0x24], P0 ; /* 0x00000900ff077624 */ /* 0x000fc600000e06ff */ /*00e0*/ STL [R1+0x20], R16 ; /* 0x0000201001007387 */ /* 0x0005e80000100800 */ /*00f0*/ STL.64 [R1+0x10], R12 ; /* 0x0000100c01007387 */ /* 0x0015e20000100a00 */ /*0100*/ IMAD R2, R12, c[0x0][0x0], R9 ; /* 0x000000000c027a24 */ /* 0x002fe400078e0209 */ /*0110*/ IMAD R3, R15, c[0x0][0x4], R14 ; /* 0x000001000f037a24 */ /* 0x010fe200078e020e */ /*0120*/ STL.64 [R1+0x18], R14 ; /* 0x0000180e01007387 */ /* 0x0005e60000100a00 */ /*0130*/ IMAD R8, R2, 0x4, R3 ; /* 0x0000000402087824 */ /* 0x000fe200078e0203 */ /*0140*/ STL.64 [R1], R2 ; /* 0x0000000201007387 */ /* 0x0005e80000100a00 */ /*0150*/ STL.64 [R1+0x8], R8 ; /* 0x0000080801007387 */ /* 0x0005e40000100a00 */ /*0160*/ LEPC R2 ; /* 0x000000000002734e */ /* 0x00cfe40000000000 */ /*0170*/ MOV R9, 0x1e0 ; /* 0x000001e000097802 */ /* 0x000fe40000000f00 */ /*0180*/ MOV R20, 0x160 ; /* 0x0000016000147802 */ /* 0x000fc40000000f00 */ /*0190*/ MOV R21, 0x0 ; /* 0x0000000000157802 */ /* 0x000fe40000000f00 */ /*01a0*/ MOV R0, 0x0 ; /* 0x0000000000007802 */ /* 0x000fe40000000f00 */ /*01b0*/ IADD3 R20, P0, P1, -R20, R9, R2 ; /* 0x0000000914147210 */ /* 0x000fc8000791e102 */ /*01c0*/ IADD3.X R21, ~R0, R21, R3, P0, P1 ; /* 0x0000001500157210 */ /* 0x000fc800007e2503 */ /*01d0*/ CALL.ABS.NOINC R10 ; /* 0x000000000a007343 */ /* 0x000fea0003c00000 */ /*01e0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*01f0*/ BRA 0x1f0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0200*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0210*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0220*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0230*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0240*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0250*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0260*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0270*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ .......... Function : _Z7matMultPiS_S_ .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */ /* 0x000e220000002500 */ /*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */ /* 0x000fe200000001ff */ /*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe40000000a00 */ /*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e280000002100 */ /*0050*/ S2R R6, SR_CTAID.Y ; /* 0x0000000000067919 */ /* 0x000e680000002600 */ /*0060*/ S2R R5, SR_TID.Y ; /* 0x0000000000057919 */ /* 0x000e620000002200 */ /*0070*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */ /* 0x001fc400078e0203 */ /*0080*/ IMAD R6, R6, c[0x0][0x4], R5 ; /* 0x0000010006067a24 */ /* 0x002fc600078e0205 */ /*0090*/ SHF.L.U32 R5, R0, 0x2, RZ ; /* 0x0000000200057819 */ /* 0x000fc800000006ff */ /*00a0*/ IADD3 R2, R6, R5, RZ ; /* 0x0000000506027210 */ /* 0x000fe20007ffe0ff */ /*00b0*/ IMAD.WIDE R4, R5, R7, c[0x0][0x160] ; /* 0x0000580005047625 */ /* 0x000fc800078e0207 */ /*00c0*/ IMAD.WIDE R2, R2, R7, c[0x0][0x170] ; /* 0x00005c0002027625 */ /* 0x000fc800078e0207 */ /*00d0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x168] ; /* 0x00005a0006067625 */ /* 0x000fe200078e0207 */ /*00e0*/ STG.E [R2.64], RZ ; /* 0x000000ff02007986 */ /* 0x000fe8000c101904 */ /*00f0*/ LDG.E R0, [R4.64] ; /* 0x0000000404007981 */ /* 0x000ea8000c1e1900 */ /*0100*/ LDG.E R9, [R6.64] ; /* 0x0000000406097981 */ /* 0x000ea4000c1e1900 */ /*0110*/ IMAD R9, R0, R9, RZ ; /* 0x0000000900097224 */ /* 0x004fca00078e02ff */ /*0120*/ STG.E [R2.64], R9 ; /* 0x0000000902007986 */ /* 0x0001e8000c101904 */ /*0130*/ LDG.E R0, [R4.64+0x4] ; /* 0x0000040404007981 */ /* 0x000ea8000c1e1900 */ /*0140*/ LDG.E R8, [R6.64+0x10] ; /* 0x0000100406087981 */ /* 0x000ea4000c1e1900 */ /*0150*/ IMAD R11, R0, R8, R9 ; /* 0x00000008000b7224 */ /* 0x004fca00078e0209 */ /*0160*/ STG.E [R2.64], R11 ; /* 0x0000000b02007986 */ /* 0x000fe8000c101904 */ /*0170*/ LDG.E R0, [R4.64+0x8] ; /* 0x0000080404007981 */ /* 0x000ea8000c1e1900 */ /*0180*/ LDG.E R8, [R6.64+0x20] ; /* 0x0000200406087981 */ /* 0x000ea4000c1e1900 */ /*0190*/ IMAD R13, R0, R8, R11 ; /* 0x00000008000d7224 */ /* 0x004fca00078e020b */ /*01a0*/ STG.E [R2.64], R13 ; /* 0x0000000d02007986 */ /* 0x000fe8000c101904 */ /*01b0*/ LDG.E R0, [R4.64+0xc] ; /* 0x00000c0404007981 */ /* 0x000e28000c1e1900 */ /*01c0*/ LDG.E R8, [R6.64+0x30] ; /* 0x0000300406087981 */ /* 0x000e24000c1e1900 */ /*01d0*/ IMAD R9, R0, R8, R13 ; /* 0x0000000800097224 */ /* 0x001fca00078e020d */ /*01e0*/ STG.E [R2.64], R9 ; /* 0x0000000902007986 */ /* 0x000fe2000c101904 */ /*01f0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0200*/ BRA 0x200; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0210*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0220*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0230*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0240*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0250*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0260*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0270*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0280*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0290*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*02f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
// Multiplicação de matrizes em CUDA // Disciplina: OPRP001 - Programação Paralela // Prof.: Mauricio Pillon // Aluno: Renato Tanaka #include <cuda.h> #include <stdio.h> #include <math.h> // Matriz Quadrada (nro_linhas = nro_colunas) #define N 4 // Número de linhas // Número de colunas // GPU: Multiplicação das matrizes (a) e (b), resultado em (c) __global__ void matMult (int *da, int *db, int *dc) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; dc[i*N+j] = 0; for(int k=0; k<N; k++) dc[i*N+j] += da[i*N+k] * db[k*N+j]; } // GPU: Imprime índices na matriz __global__ void printIndex (void) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; printf ("[%d][%d]=%d\t(x)\t%d\t%d\t%d\t(y)\t%d\t%d\t%d\n",i,j,(i*N+j), threadIdx.x, blockIdx.x, blockDim.x,threadIdx.y, blockIdx.y, blockDim.y); } // GPU: Inicializa os vetores (a), (b) e (c) na Memória Global __global__ void dirtyMem (int *da, int *db, int *dc) { int i = blockIdx.x * blockDim.x + threadIdx.x; da[i] = -1; db[i] = -2; dc[i] = -3; } // CPU: Inicializa os vetores (a) e (b) __host__ void initvet(int *host_a, int *host_b) { for (int i=0; i < N; i++) { for (int j=0; j < N; j++) { host_b[i*N+j] = (i+j)+((N-1)*i); host_a[i*N+j] = (N*N)-host_b[i*N+j]; } } } // CPU: Imprime matriz __host__ void printMat (int *mat){ for (int j =0; j < N; j++) printf("\t(%d)", j); printf("\n"); for (int i=0; i < N; i++) { printf("(%d)", i); for (int j=0; j < N; j++){ printf("\t%d", mat[i*N+j]); } printf("\n"); } } // CPU: função principal int main(int argc, char const *argv[]) { int *a, *b, *c; int *dev_a, *dev_b, *dev_c; int size; // Alocação de matriz quadrada size = N * N * sizeof(int); // Alocação de memória no host cudaMallocHost((void **) &a, size); cudaMallocHost((void **) &b, size); cudaMallocHost((void **) &c, size); // Alocação de memória na GPU para os vetores (a,b e c) cudaMalloc ((void **) &dev_a, size); cudaMalloc ((void **) &dev_b, size); cudaMalloc ((void **) &dev_c, size); // Atribui valores iniciais aos vetores em GPU dirtyMem<<<N, N>>>(dev_a, dev_b, dev_c); // Cópia GPU para CPU cudaMemcpy (a, dev_a, size, cudaMemcpyDeviceToHost); cudaMemcpy (b, dev_b, size, cudaMemcpyDeviceToHost); cudaMemcpy (c, dev_c, size, cudaMemcpyDeviceToHost); // Impressão na tela dos valores dos vetores printf ("\t ### Valores Inicializados na GPU ###\n"); printf ("\t ### Matriz (a) ### \n"); printMat(a); printf ("\t ### Matriz (b) ### \n"); printMat(b); printf ("\t ### Matriz (c) ### \n"); printMat(c); // Inicialização dos vetores (a) e (b) no host initvet(a,b); // Cópia dos vetores gerados em CPU p/ memória da GPU cudaMemcpy (dev_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy (dev_b, b, size, cudaMemcpyHostToDevice); // Número de blocos e threads p/ dimensões (x,y) dim3 dimBlock (1, 1); dim3 dimThreads(N, N); // Imprime as posições acessadas pelo dimBlock e dimThreads printIndex<<< dimBlock, dimThreads>>>(); // Execução do kernel matMult em GPU matMult<<< dimBlock, dimThreads>>>(dev_a, dev_b, dev_c); cudaDeviceSynchronize(); // Cópia do vetor (c) da GPU (Memória Global) para CPU cudaMemcpy (c, dev_c, size, cudaMemcpyDeviceToHost); // Impressão na tela dos valores dos vetores printf ("\t ### Valores após processamento em GPU ###\n"); printf ("\t ### Matriz (a) ### \n"); printMat(a); printf ("\t ### Matriz (b) ### \n"); printMat(b); printf ("\t ### Matriz (c) ### \n"); printMat(c); // Libera a Memória Global (GPU) cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); // Libera a Memória Global (CPU) cudaFreeHost(a); cudaFreeHost(b); cudaFreeHost(c); return 0; }
.file "tmpxft_001a3795_00000000-6_cuda.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2062: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2062: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z7initvetPiS_ .type _Z7initvetPiS_, @function _Z7initvetPiS_: .LFB2057: .cfi_startproc endbr64 movl $4, %ecx movl $0, %r10d movl $16, %r9d .L4: leal -4(%rcx), %eax movq %r10, %rdx salq $4, %rdx .L5: movl %eax, (%rsi,%rdx) movl %r9d, %r8d subl %eax, %r8d movl %r8d, (%rdi,%rdx) addl $1, %eax addq $4, %rdx cmpl %ecx, %eax jne .L5 addq $1, %r10 addl $4, %ecx cmpl $20, %ecx jne .L4 ret .cfi_endproc .LFE2057: .size _Z7initvetPiS_, .-_Z7initvetPiS_ .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "\t(%d)" .LC1: .string "\n" .LC2: .string "(%d)" .LC3: .string "\t%d" .text .globl _Z8printMatPi .type _Z8printMatPi, @function _Z8printMatPi: .LFB2058: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $8, %rsp .cfi_def_cfa_offset 64 movq %rdi, %rbp movl $0, %ebx leaq .LC0(%rip), %r12 .L9: movl %ebx, %edx movq %r12, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT addl $1, %ebx cmpl $4, %ebx jne .L9 leaq .LC1(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT addq $16, %rbp movl $0, %r13d leaq .LC2(%rip), %r15 leaq .LC3(%rip), %r12 leaq .LC1(%rip), %r14 .L11: movl %r13d, %edx movq %r15, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT leaq -16(%rbp), %rbx .L10: movl (%rbx), %edx movq %r12, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT addq $4, %rbx cmpq %rbp, %rbx jne .L10 movq %r14, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT addl $1, %r13d addq $16, %rbp cmpl $4, %r13d jne .L11 addq $8, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2058: .size _Z8printMatPi, .-_Z8printMatPi .globl _Z30__device_stub__Z7matMultPiS_S_PiS_S_ .type _Z30__device_stub__Z7matMultPiS_S_PiS_S_, @function _Z30__device_stub__Z7matMultPiS_S_PiS_S_: .LFB2084: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L20 .L16: movq 120(%rsp), %rax subq %fs:40, %rax jne .L21 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L20: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z7matMultPiS_S_(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L16 .L21: call __stack_chk_fail@PLT .cfi_endproc .LFE2084: .size _Z30__device_stub__Z7matMultPiS_S_PiS_S_, .-_Z30__device_stub__Z7matMultPiS_S_PiS_S_ .globl _Z7matMultPiS_S_ .type _Z7matMultPiS_S_, @function _Z7matMultPiS_S_: .LFB2085: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z30__device_stub__Z7matMultPiS_S_PiS_S_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2085: .size _Z7matMultPiS_S_, .-_Z7matMultPiS_S_ .globl _Z29__device_stub__Z10printIndexvv .type _Z29__device_stub__Z10printIndexvv, @function _Z29__device_stub__Z10printIndexvv: .LFB2086: .cfi_startproc endbr64 subq $88, %rsp .cfi_def_cfa_offset 96 movq %fs:40, %rax movq %rax, 72(%rsp) xorl %eax, %eax movl $1, 16(%rsp) movl $1, 20(%rsp) movl $1, 24(%rsp) movl $1, 28(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) leaq 8(%rsp), %rcx movq %rsp, %rdx leaq 28(%rsp), %rsi leaq 16(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L28 .L24: movq 72(%rsp), %rax subq %fs:40, %rax jne .L29 addq $88, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L28: .cfi_restore_state pushq 8(%rsp) .cfi_def_cfa_offset 104 pushq 8(%rsp) .cfi_def_cfa_offset 112 leaq 80(%rsp), %r9 movq 44(%rsp), %rcx movl 52(%rsp), %r8d movq 32(%rsp), %rsi movl 40(%rsp), %edx leaq _Z10printIndexv(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 96 jmp .L24 .L29: call __stack_chk_fail@PLT .cfi_endproc .LFE2086: .size _Z29__device_stub__Z10printIndexvv, .-_Z29__device_stub__Z10printIndexvv .globl _Z10printIndexv .type _Z10printIndexv, @function _Z10printIndexv: .LFB2087: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z29__device_stub__Z10printIndexvv addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2087: .size _Z10printIndexv, .-_Z10printIndexv .globl _Z31__device_stub__Z8dirtyMemPiS_S_PiS_S_ .type _Z31__device_stub__Z8dirtyMemPiS_S_PiS_S_, @function _Z31__device_stub__Z8dirtyMemPiS_S_PiS_S_: .LFB2088: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L36 .L32: movq 120(%rsp), %rax subq %fs:40, %rax jne .L37 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L36: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z8dirtyMemPiS_S_(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L32 .L37: call __stack_chk_fail@PLT .cfi_endproc .LFE2088: .size _Z31__device_stub__Z8dirtyMemPiS_S_PiS_S_, .-_Z31__device_stub__Z8dirtyMemPiS_S_PiS_S_ .globl _Z8dirtyMemPiS_S_ .type _Z8dirtyMemPiS_S_, @function _Z8dirtyMemPiS_S_: .LFB2089: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z31__device_stub__Z8dirtyMemPiS_S_PiS_S_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2089: .size _Z8dirtyMemPiS_S_, .-_Z8dirtyMemPiS_S_ .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC4: .string "\t ### Valores Inicializados na GPU ###\n" .section .rodata.str1.1 .LC5: .string "\t ### Matriz (a) ### \n" .LC6: .string "\t ### Matriz (b) ### \n" .LC7: .string "\t ### Matriz (c) ### \n" .section .rodata.str1.8 .align 8 .LC8: .string "\t ### Valores ap\303\263s processamento em GPU ###\n" .text .globl main .type main, @function main: .LFB2059: .cfi_startproc endbr64 subq $88, %rsp .cfi_def_cfa_offset 96 movq %fs:40, %rax movq %rax, 72(%rsp) xorl %eax, %eax movq %rsp, %rdi movl $64, %esi call cudaMallocHost@PLT leaq 8(%rsp), %rdi movl $64, %esi call cudaMallocHost@PLT leaq 16(%rsp), %rdi movl $64, %esi call cudaMallocHost@PLT leaq 24(%rsp), %rdi movl $64, %esi call cudaMalloc@PLT leaq 32(%rsp), %rdi movl $64, %esi call cudaMalloc@PLT leaq 40(%rsp), %rdi movl $64, %esi call cudaMalloc@PLT movl $4, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) movl $4, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $0, %r9d movl $0, %r8d movq 60(%rsp), %rdx movl $1, %ecx movq 48(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L46 .L41: movl $2, %ecx movl $64, %edx movq 24(%rsp), %rsi movq (%rsp), %rdi call cudaMemcpy@PLT movl $2, %ecx movl $64, %edx movq 32(%rsp), %rsi movq 8(%rsp), %rdi call cudaMemcpy@PLT movl $2, %ecx movl $64, %edx movq 40(%rsp), %rsi movq 16(%rsp), %rdi call cudaMemcpy@PLT leaq .LC4(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT leaq .LC5(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movq (%rsp), %rdi call _Z8printMatPi leaq .LC6(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movq 8(%rsp), %rdi call _Z8printMatPi leaq .LC7(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movq 16(%rsp), %rdi call _Z8printMatPi movq 8(%rsp), %rsi movq (%rsp), %rdi call _Z7initvetPiS_ movl $1, %ecx movl $64, %edx movq (%rsp), %rsi movq 24(%rsp), %rdi call cudaMemcpy@PLT movl $1, %ecx movl $64, %edx movq 8(%rsp), %rsi movq 32(%rsp), %rdi call cudaMemcpy@PLT movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $4, 60(%rsp) movl $4, 64(%rsp) movl $1, 68(%rsp) movl $0, %r9d movl $0, %r8d movq 60(%rsp), %rdx movl $1, %ecx movq 48(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L47 .L42: movl 68(%rsp), %ecx movl $0, %r9d movl $0, %r8d movq 60(%rsp), %rdx movq 48(%rsp), %rdi movl 56(%rsp), %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L48 .L43: call cudaDeviceSynchronize@PLT movl $2, %ecx movl $64, %edx movq 40(%rsp), %rsi movq 16(%rsp), %rdi call cudaMemcpy@PLT leaq .LC8(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT leaq .LC5(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movq (%rsp), %rdi call _Z8printMatPi leaq .LC6(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movq 8(%rsp), %rdi call _Z8printMatPi leaq .LC7(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movq 16(%rsp), %rdi call _Z8printMatPi movq 24(%rsp), %rdi call cudaFree@PLT movq 32(%rsp), %rdi call cudaFree@PLT movq 40(%rsp), %rdi call cudaFree@PLT movq (%rsp), %rdi call cudaFreeHost@PLT movq 8(%rsp), %rdi call cudaFreeHost@PLT movq 16(%rsp), %rdi call cudaFreeHost@PLT movq 72(%rsp), %rax subq %fs:40, %rax jne .L49 movl $0, %eax addq $88, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L46: .cfi_restore_state movq 40(%rsp), %rdx movq 32(%rsp), %rsi movq 24(%rsp), %rdi call _Z31__device_stub__Z8dirtyMemPiS_S_PiS_S_ jmp .L41 .L47: call _Z29__device_stub__Z10printIndexvv jmp .L42 .L48: movq 40(%rsp), %rdx movq 32(%rsp), %rsi movq 24(%rsp), %rdi call _Z30__device_stub__Z7matMultPiS_S_PiS_S_ jmp .L43 .L49: call __stack_chk_fail@PLT .cfi_endproc .LFE2059: .size main, .-main .section .rodata.str1.1 .LC9: .string "_Z8dirtyMemPiS_S_" .LC10: .string "_Z10printIndexv" .LC11: .string "_Z7matMultPiS_S_" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2091: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rbx movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC9(%rip), %rdx movq %rdx, %rcx leaq _Z8dirtyMemPiS_S_(%rip), %rsi movq %rax, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC10(%rip), %rdx movq %rdx, %rcx leaq _Z10printIndexv(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC11(%rip), %rdx movq %rdx, %rcx leaq _Z7matMultPiS_S_(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2091: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
// Multiplicação de matrizes em CUDA // Disciplina: OPRP001 - Programação Paralela // Prof.: Mauricio Pillon // Aluno: Renato Tanaka #include <cuda.h> #include <stdio.h> #include <math.h> // Matriz Quadrada (nro_linhas = nro_colunas) #define N 4 // Número de linhas // Número de colunas // GPU: Multiplicação das matrizes (a) e (b), resultado em (c) __global__ void matMult (int *da, int *db, int *dc) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; dc[i*N+j] = 0; for(int k=0; k<N; k++) dc[i*N+j] += da[i*N+k] * db[k*N+j]; } // GPU: Imprime índices na matriz __global__ void printIndex (void) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; printf ("[%d][%d]=%d\t(x)\t%d\t%d\t%d\t(y)\t%d\t%d\t%d\n",i,j,(i*N+j), threadIdx.x, blockIdx.x, blockDim.x,threadIdx.y, blockIdx.y, blockDim.y); } // GPU: Inicializa os vetores (a), (b) e (c) na Memória Global __global__ void dirtyMem (int *da, int *db, int *dc) { int i = blockIdx.x * blockDim.x + threadIdx.x; da[i] = -1; db[i] = -2; dc[i] = -3; } // CPU: Inicializa os vetores (a) e (b) __host__ void initvet(int *host_a, int *host_b) { for (int i=0; i < N; i++) { for (int j=0; j < N; j++) { host_b[i*N+j] = (i+j)+((N-1)*i); host_a[i*N+j] = (N*N)-host_b[i*N+j]; } } } // CPU: Imprime matriz __host__ void printMat (int *mat){ for (int j =0; j < N; j++) printf("\t(%d)", j); printf("\n"); for (int i=0; i < N; i++) { printf("(%d)", i); for (int j=0; j < N; j++){ printf("\t%d", mat[i*N+j]); } printf("\n"); } } // CPU: função principal int main(int argc, char const *argv[]) { int *a, *b, *c; int *dev_a, *dev_b, *dev_c; int size; // Alocação de matriz quadrada size = N * N * sizeof(int); // Alocação de memória no host cudaMallocHost((void **) &a, size); cudaMallocHost((void **) &b, size); cudaMallocHost((void **) &c, size); // Alocação de memória na GPU para os vetores (a,b e c) cudaMalloc ((void **) &dev_a, size); cudaMalloc ((void **) &dev_b, size); cudaMalloc ((void **) &dev_c, size); // Atribui valores iniciais aos vetores em GPU dirtyMem<<<N, N>>>(dev_a, dev_b, dev_c); // Cópia GPU para CPU cudaMemcpy (a, dev_a, size, cudaMemcpyDeviceToHost); cudaMemcpy (b, dev_b, size, cudaMemcpyDeviceToHost); cudaMemcpy (c, dev_c, size, cudaMemcpyDeviceToHost); // Impressão na tela dos valores dos vetores printf ("\t ### Valores Inicializados na GPU ###\n"); printf ("\t ### Matriz (a) ### \n"); printMat(a); printf ("\t ### Matriz (b) ### \n"); printMat(b); printf ("\t ### Matriz (c) ### \n"); printMat(c); // Inicialização dos vetores (a) e (b) no host initvet(a,b); // Cópia dos vetores gerados em CPU p/ memória da GPU cudaMemcpy (dev_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy (dev_b, b, size, cudaMemcpyHostToDevice); // Número de blocos e threads p/ dimensões (x,y) dim3 dimBlock (1, 1); dim3 dimThreads(N, N); // Imprime as posições acessadas pelo dimBlock e dimThreads printIndex<<< dimBlock, dimThreads>>>(); // Execução do kernel matMult em GPU matMult<<< dimBlock, dimThreads>>>(dev_a, dev_b, dev_c); cudaDeviceSynchronize(); // Cópia do vetor (c) da GPU (Memória Global) para CPU cudaMemcpy (c, dev_c, size, cudaMemcpyDeviceToHost); // Impressão na tela dos valores dos vetores printf ("\t ### Valores após processamento em GPU ###\n"); printf ("\t ### Matriz (a) ### \n"); printMat(a); printf ("\t ### Matriz (b) ### \n"); printMat(b); printf ("\t ### Matriz (c) ### \n"); printMat(c); // Libera a Memória Global (GPU) cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); // Libera a Memória Global (CPU) cudaFreeHost(a); cudaFreeHost(b); cudaFreeHost(c); return 0; }
// Multiplicação de matrizes em CUDA // Disciplina: OPRP001 - Programação Paralela // Prof.: Mauricio Pillon // Aluno: Renato Tanaka #include <hip/hip_runtime.h> #include <stdio.h> #include <math.h> // Matriz Quadrada (nro_linhas = nro_colunas) #define N 4 // Número de linhas // Número de colunas // GPU: Multiplicação das matrizes (a) e (b), resultado em (c) __global__ void matMult (int *da, int *db, int *dc) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; dc[i*N+j] = 0; for(int k=0; k<N; k++) dc[i*N+j] += da[i*N+k] * db[k*N+j]; } // GPU: Imprime índices na matriz __global__ void printIndex (void) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; printf ("[%d][%d]=%d\t(x)\t%d\t%d\t%d\t(y)\t%d\t%d\t%d\n",i,j,(i*N+j), threadIdx.x, blockIdx.x, blockDim.x,threadIdx.y, blockIdx.y, blockDim.y); } // GPU: Inicializa os vetores (a), (b) e (c) na Memória Global __global__ void dirtyMem (int *da, int *db, int *dc) { int i = blockIdx.x * blockDim.x + threadIdx.x; da[i] = -1; db[i] = -2; dc[i] = -3; } // CPU: Inicializa os vetores (a) e (b) __host__ void initvet(int *host_a, int *host_b) { for (int i=0; i < N; i++) { for (int j=0; j < N; j++) { host_b[i*N+j] = (i+j)+((N-1)*i); host_a[i*N+j] = (N*N)-host_b[i*N+j]; } } } // CPU: Imprime matriz __host__ void printMat (int *mat){ for (int j =0; j < N; j++) printf("\t(%d)", j); printf("\n"); for (int i=0; i < N; i++) { printf("(%d)", i); for (int j=0; j < N; j++){ printf("\t%d", mat[i*N+j]); } printf("\n"); } } // CPU: função principal int main(int argc, char const *argv[]) { int *a, *b, *c; int *dev_a, *dev_b, *dev_c; int size; // Alocação de matriz quadrada size = N * N * sizeof(int); // Alocação de memória no host hipHostMalloc((void **) &a, size, hipHostMallocDefault); hipHostMalloc((void **) &b, size, hipHostMallocDefault); hipHostMalloc((void **) &c, size, hipHostMallocDefault); // Alocação de memória na GPU para os vetores (a,b e c) hipMalloc ((void **) &dev_a, size); hipMalloc ((void **) &dev_b, size); hipMalloc ((void **) &dev_c, size); // Atribui valores iniciais aos vetores em GPU dirtyMem<<<N, N>>>(dev_a, dev_b, dev_c); // Cópia GPU para CPU hipMemcpy (a, dev_a, size, hipMemcpyDeviceToHost); hipMemcpy (b, dev_b, size, hipMemcpyDeviceToHost); hipMemcpy (c, dev_c, size, hipMemcpyDeviceToHost); // Impressão na tela dos valores dos vetores printf ("\t ### Valores Inicializados na GPU ###\n"); printf ("\t ### Matriz (a) ### \n"); printMat(a); printf ("\t ### Matriz (b) ### \n"); printMat(b); printf ("\t ### Matriz (c) ### \n"); printMat(c); // Inicialização dos vetores (a) e (b) no host initvet(a,b); // Cópia dos vetores gerados em CPU p/ memória da GPU hipMemcpy (dev_a, a, size, hipMemcpyHostToDevice); hipMemcpy (dev_b, b, size, hipMemcpyHostToDevice); // Número de blocos e threads p/ dimensões (x,y) dim3 dimBlock (1, 1); dim3 dimThreads(N, N); // Imprime as posições acessadas pelo dimBlock e dimThreads printIndex<<< dimBlock, dimThreads>>>(); // Execução do kernel matMult em GPU matMult<<< dimBlock, dimThreads>>>(dev_a, dev_b, dev_c); hipDeviceSynchronize(); // Cópia do vetor (c) da GPU (Memória Global) para CPU hipMemcpy (c, dev_c, size, hipMemcpyDeviceToHost); // Impressão na tela dos valores dos vetores printf ("\t ### Valores após processamento em GPU ###\n"); printf ("\t ### Matriz (a) ### \n"); printMat(a); printf ("\t ### Matriz (b) ### \n"); printMat(b); printf ("\t ### Matriz (c) ### \n"); printMat(c); // Libera a Memória Global (GPU) hipFree(dev_a); hipFree(dev_b); hipFree(dev_c); // Libera a Memória Global (CPU) hipHostFree(a); hipHostFree(b); hipHostFree(c); return 0; }
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
// Multiplicação de matrizes em CUDA // Disciplina: OPRP001 - Programação Paralela // Prof.: Mauricio Pillon // Aluno: Renato Tanaka #include <hip/hip_runtime.h> #include <stdio.h> #include <math.h> // Matriz Quadrada (nro_linhas = nro_colunas) #define N 4 // Número de linhas // Número de colunas // GPU: Multiplicação das matrizes (a) e (b), resultado em (c) __global__ void matMult (int *da, int *db, int *dc) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; dc[i*N+j] = 0; for(int k=0; k<N; k++) dc[i*N+j] += da[i*N+k] * db[k*N+j]; } // GPU: Imprime índices na matriz __global__ void printIndex (void) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; printf ("[%d][%d]=%d\t(x)\t%d\t%d\t%d\t(y)\t%d\t%d\t%d\n",i,j,(i*N+j), threadIdx.x, blockIdx.x, blockDim.x,threadIdx.y, blockIdx.y, blockDim.y); } // GPU: Inicializa os vetores (a), (b) e (c) na Memória Global __global__ void dirtyMem (int *da, int *db, int *dc) { int i = blockIdx.x * blockDim.x + threadIdx.x; da[i] = -1; db[i] = -2; dc[i] = -3; } // CPU: Inicializa os vetores (a) e (b) __host__ void initvet(int *host_a, int *host_b) { for (int i=0; i < N; i++) { for (int j=0; j < N; j++) { host_b[i*N+j] = (i+j)+((N-1)*i); host_a[i*N+j] = (N*N)-host_b[i*N+j]; } } } // CPU: Imprime matriz __host__ void printMat (int *mat){ for (int j =0; j < N; j++) printf("\t(%d)", j); printf("\n"); for (int i=0; i < N; i++) { printf("(%d)", i); for (int j=0; j < N; j++){ printf("\t%d", mat[i*N+j]); } printf("\n"); } } // CPU: função principal int main(int argc, char const *argv[]) { int *a, *b, *c; int *dev_a, *dev_b, *dev_c; int size; // Alocação de matriz quadrada size = N * N * sizeof(int); // Alocação de memória no host hipHostMalloc((void **) &a, size, hipHostMallocDefault); hipHostMalloc((void **) &b, size, hipHostMallocDefault); hipHostMalloc((void **) &c, size, hipHostMallocDefault); // Alocação de memória na GPU para os vetores (a,b e c) hipMalloc ((void **) &dev_a, size); hipMalloc ((void **) &dev_b, size); hipMalloc ((void **) &dev_c, size); // Atribui valores iniciais aos vetores em GPU dirtyMem<<<N, N>>>(dev_a, dev_b, dev_c); // Cópia GPU para CPU hipMemcpy (a, dev_a, size, hipMemcpyDeviceToHost); hipMemcpy (b, dev_b, size, hipMemcpyDeviceToHost); hipMemcpy (c, dev_c, size, hipMemcpyDeviceToHost); // Impressão na tela dos valores dos vetores printf ("\t ### Valores Inicializados na GPU ###\n"); printf ("\t ### Matriz (a) ### \n"); printMat(a); printf ("\t ### Matriz (b) ### \n"); printMat(b); printf ("\t ### Matriz (c) ### \n"); printMat(c); // Inicialização dos vetores (a) e (b) no host initvet(a,b); // Cópia dos vetores gerados em CPU p/ memória da GPU hipMemcpy (dev_a, a, size, hipMemcpyHostToDevice); hipMemcpy (dev_b, b, size, hipMemcpyHostToDevice); // Número de blocos e threads p/ dimensões (x,y) dim3 dimBlock (1, 1); dim3 dimThreads(N, N); // Imprime as posições acessadas pelo dimBlock e dimThreads printIndex<<< dimBlock, dimThreads>>>(); // Execução do kernel matMult em GPU matMult<<< dimBlock, dimThreads>>>(dev_a, dev_b, dev_c); hipDeviceSynchronize(); // Cópia do vetor (c) da GPU (Memória Global) para CPU hipMemcpy (c, dev_c, size, hipMemcpyDeviceToHost); // Impressão na tela dos valores dos vetores printf ("\t ### Valores após processamento em GPU ###\n"); printf ("\t ### Matriz (a) ### \n"); printMat(a); printf ("\t ### Matriz (b) ### \n"); printMat(b); printf ("\t ### Matriz (c) ### \n"); printMat(c); // Libera a Memória Global (GPU) hipFree(dev_a); hipFree(dev_b); hipFree(dev_c); // Libera a Memória Global (CPU) hipHostFree(a); hipHostFree(b); hipHostFree(c); return 0; }
.text .file "cuda.hip" .globl _Z22__device_stub__matMultPiS_S_ # -- Begin function _Z22__device_stub__matMultPiS_S_ .p2align 4, 0x90 .type _Z22__device_stub__matMultPiS_S_,@function _Z22__device_stub__matMultPiS_S_: # @_Z22__device_stub__matMultPiS_S_ .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z7matMultPiS_S_, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end0: .size _Z22__device_stub__matMultPiS_S_, .Lfunc_end0-_Z22__device_stub__matMultPiS_S_ .cfi_endproc # -- End function .globl _Z25__device_stub__printIndexv # -- Begin function _Z25__device_stub__printIndexv .p2align 4, 0x90 .type _Z25__device_stub__printIndexv,@function _Z25__device_stub__printIndexv: # @_Z25__device_stub__printIndexv .cfi_startproc # %bb.0: subq $56, %rsp .cfi_def_cfa_offset 64 leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 48(%rsp), %r9 movl $_Z10printIndexv, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $72, %rsp .cfi_adjust_cfa_offset -72 retq .Lfunc_end1: .size _Z25__device_stub__printIndexv, .Lfunc_end1-_Z25__device_stub__printIndexv .cfi_endproc # -- End function .globl _Z23__device_stub__dirtyMemPiS_S_ # -- Begin function _Z23__device_stub__dirtyMemPiS_S_ .p2align 4, 0x90 .type _Z23__device_stub__dirtyMemPiS_S_,@function _Z23__device_stub__dirtyMemPiS_S_: # @_Z23__device_stub__dirtyMemPiS_S_ .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z8dirtyMemPiS_S_, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end2: .size _Z23__device_stub__dirtyMemPiS_S_, .Lfunc_end2-_Z23__device_stub__dirtyMemPiS_S_ .cfi_endproc # -- End function .globl _Z7initvetPiS_ # -- Begin function _Z7initvetPiS_ .p2align 4, 0x90 .type _Z7initvetPiS_,@function _Z7initvetPiS_: # @_Z7initvetPiS_ .cfi_startproc # %bb.0: movl $16, %eax xorl %ecx, %ecx xorl %edx, %edx .p2align 4, 0x90 .LBB3_1: # %.preheader # =>This Loop Header: Depth=1 # Child Loop BB3_2 Depth 2 movl %eax, %r8d xorl %r9d, %r9d .p2align 4, 0x90 .LBB3_2: # Parent Loop BB3_1 Depth=1 # => This Inner Loop Header: Depth=2 leal (%rcx,%r9), %r10d movl %r10d, (%rsi,%r9,4) movl %r8d, (%rdi,%r9,4) incq %r9 decl %r8d cmpq $4, %r9 jne .LBB3_2 # %bb.3: # in Loop: Header=BB3_1 Depth=1 incq %rdx addl $-4, %eax addq $4, %rcx addq $16, %rdi addq $16, %rsi cmpq $4, %rdx jne .LBB3_1 # %bb.4: retq .Lfunc_end3: .size _Z7initvetPiS_, .Lfunc_end3-_Z7initvetPiS_ .cfi_endproc # -- End function .globl _Z8printMatPi # -- Begin function _Z8printMatPi .p2align 4, 0x90 .type _Z8printMatPi,@function _Z8printMatPi: # @_Z8printMatPi .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %rbx .cfi_def_cfa_offset 40 pushq %rax .cfi_def_cfa_offset 48 .cfi_offset %rbx, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movq %rdi, %rbx xorl %ebp, %ebp .p2align 4, 0x90 .LBB4_1: # =>This Inner Loop Header: Depth=1 movl $.L.str, %edi movl %ebp, %esi xorl %eax, %eax callq printf incl %ebp cmpl $4, %ebp jne .LBB4_1 # %bb.2: movl $10, %edi callq putchar@PLT xorl %r14d, %r14d .p2align 4, 0x90 .LBB4_3: # =>This Loop Header: Depth=1 # Child Loop BB4_4 Depth 2 movl $.L.str.2, %edi movl %r14d, %esi xorl %eax, %eax callq printf xorl %r15d, %r15d .p2align 4, 0x90 .LBB4_4: # Parent Loop BB4_3 Depth=1 # => This Inner Loop Header: Depth=2 movl (%rbx,%r15,4), %esi movl $.L.str.3, %edi xorl %eax, %eax callq printf incq %r15 cmpq $4, %r15 jne .LBB4_4 # %bb.5: # in Loop: Header=BB4_3 Depth=1 movl $10, %edi callq putchar@PLT incq %r14 addq $16, %rbx cmpq $4, %r14 jne .LBB4_3 # %bb.6: addq $8, %rsp .cfi_def_cfa_offset 40 popq %rbx .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end4: .size _Z8printMatPi, .Lfunc_end4-_Z8printMatPi .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r12 .cfi_def_cfa_offset 40 pushq %rbx .cfi_def_cfa_offset 48 subq $160, %rsp .cfi_def_cfa_offset 208 .cfi_offset %rbx, -48 .cfi_offset %r12, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movabsq $4294967297, %rbx # imm = 0x100000001 leaq 48(%rsp), %rdi movl $64, %esi xorl %edx, %edx callq hipHostMalloc leaq 8(%rsp), %rdi movl $64, %esi xorl %edx, %edx callq hipHostMalloc leaq 40(%rsp), %rdi movl $64, %esi xorl %edx, %edx callq hipHostMalloc leaq 32(%rsp), %rdi movl $64, %esi callq hipMalloc leaq 24(%rsp), %rdi movl $64, %esi callq hipMalloc leaq 16(%rsp), %rdi movl $64, %esi callq hipMalloc leaq 3(%rbx), %rdi movl $1, %esi movq %rdi, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB5_2 # %bb.1: movq 32(%rsp), %rax movq 24(%rsp), %rcx movq 16(%rsp), %rdx movq %rax, 72(%rsp) movq %rcx, 96(%rsp) movq %rdx, 120(%rsp) leaq 72(%rsp), %rax movq %rax, 128(%rsp) leaq 96(%rsp), %rax movq %rax, 136(%rsp) leaq 120(%rsp), %rax movq %rax, 144(%rsp) leaq 56(%rsp), %rdi leaq 80(%rsp), %rsi leaq 112(%rsp), %rdx leaq 104(%rsp), %rcx callq __hipPopCallConfiguration movq 56(%rsp), %rsi movl 64(%rsp), %edx movq 80(%rsp), %rcx movl 88(%rsp), %r8d leaq 128(%rsp), %r9 movl $_Z8dirtyMemPiS_S_, %edi pushq 104(%rsp) .cfi_adjust_cfa_offset 8 pushq 120(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB5_2: movq 48(%rsp), %rdi movq 32(%rsp), %rsi movl $64, %edx movl $2, %ecx callq hipMemcpy movq 8(%rsp), %rdi movq 24(%rsp), %rsi movl $64, %edx movl $2, %ecx callq hipMemcpy movq 40(%rsp), %rdi movq 16(%rsp), %rsi movl $64, %edx movl $2, %ecx callq hipMemcpy movl $.Lstr, %edi callq puts@PLT movl $.Lstr.5, %edi callq puts@PLT movq 48(%rsp), %r15 xorl %ebp, %ebp .p2align 4, 0x90 .LBB5_3: # =>This Inner Loop Header: Depth=1 movl $.L.str, %edi movl %ebp, %esi xorl %eax, %eax callq printf incl %ebp cmpl $4, %ebp jne .LBB5_3 # %bb.4: movl $10, %edi callq putchar@PLT xorl %r14d, %r14d .p2align 4, 0x90 .LBB5_5: # =>This Loop Header: Depth=1 # Child Loop BB5_6 Depth 2 movl $.L.str.2, %edi movl %r14d, %esi xorl %eax, %eax callq printf xorl %r12d, %r12d .p2align 4, 0x90 .LBB5_6: # Parent Loop BB5_5 Depth=1 # => This Inner Loop Header: Depth=2 movl (%r15,%r12,4), %esi movl $.L.str.3, %edi xorl %eax, %eax callq printf incq %r12 cmpq $4, %r12 jne .LBB5_6 # %bb.7: # in Loop: Header=BB5_5 Depth=1 movl $10, %edi callq putchar@PLT incq %r14 addq $16, %r15 cmpq $4, %r14 jne .LBB5_5 # %bb.8: # %_Z8printMatPi.exit movl $.Lstr.6, %edi callq puts@PLT movq 8(%rsp), %r15 xorl %ebp, %ebp .p2align 4, 0x90 .LBB5_9: # =>This Inner Loop Header: Depth=1 movl $.L.str, %edi movl %ebp, %esi xorl %eax, %eax callq printf incl %ebp cmpl $4, %ebp jne .LBB5_9 # %bb.10: movl $10, %edi callq putchar@PLT xorl %r14d, %r14d .p2align 4, 0x90 .LBB5_11: # =>This Loop Header: Depth=1 # Child Loop BB5_12 Depth 2 movl $.L.str.2, %edi movl %r14d, %esi xorl %eax, %eax callq printf xorl %r12d, %r12d .p2align 4, 0x90 .LBB5_12: # Parent Loop BB5_11 Depth=1 # => This Inner Loop Header: Depth=2 movl (%r15,%r12,4), %esi movl $.L.str.3, %edi xorl %eax, %eax callq printf incq %r12 cmpq $4, %r12 jne .LBB5_12 # %bb.13: # in Loop: Header=BB5_11 Depth=1 movl $10, %edi callq putchar@PLT incq %r14 addq $16, %r15 cmpq $4, %r14 jne .LBB5_11 # %bb.14: # %_Z8printMatPi.exit49 movl $.Lstr.7, %edi callq puts@PLT movq 40(%rsp), %r15 xorl %ebp, %ebp .p2align 4, 0x90 .LBB5_15: # =>This Inner Loop Header: Depth=1 movl $.L.str, %edi movl %ebp, %esi xorl %eax, %eax callq printf incl %ebp cmpl $4, %ebp jne .LBB5_15 # %bb.16: movl $10, %edi callq putchar@PLT xorl %r14d, %r14d .p2align 4, 0x90 .LBB5_17: # =>This Loop Header: Depth=1 # Child Loop BB5_18 Depth 2 movl $.L.str.2, %edi movl %r14d, %esi xorl %eax, %eax callq printf xorl %r12d, %r12d .p2align 4, 0x90 .LBB5_18: # Parent Loop BB5_17 Depth=1 # => This Inner Loop Header: Depth=2 movl (%r15,%r12,4), %esi movl $.L.str.3, %edi xorl %eax, %eax callq printf incq %r12 cmpq $4, %r12 jne .LBB5_18 # %bb.19: # in Loop: Header=BB5_17 Depth=1 movl $10, %edi callq putchar@PLT incq %r14 addq $16, %r15 cmpq $4, %r14 jne .LBB5_17 # %bb.20: # %_Z8printMatPi.exit60 movq 48(%rsp), %rsi movl $16, %eax movq 8(%rsp), %rcx xorl %edx, %edx xorl %edi, %edi .p2align 4, 0x90 .LBB5_21: # %.preheader.i # =>This Loop Header: Depth=1 # Child Loop BB5_22 Depth 2 movq %rdx, %r8 xorl %r9d, %r9d .p2align 4, 0x90 .LBB5_22: # Parent Loop BB5_21 Depth=1 # => This Inner Loop Header: Depth=2 movl %r8d, (%rcx,%r8,4) leal (%rax,%r9), %r10d movl %r10d, (%rsi,%r8,4) decq %r9 incq %r8 cmpq $-4, %r9 jne .LBB5_22 # %bb.23: # in Loop: Header=BB5_21 Depth=1 incq %rdi addq $4, %rdx addq $-4, %rax cmpq $4, %rdi jne .LBB5_21 # %bb.24: # %_Z7initvetPiS_.exit movabsq $17179869188, %r14 # imm = 0x400000004 movq 32(%rsp), %rdi movl $64, %edx movl $1, %ecx callq hipMemcpy movq 24(%rsp), %rdi movq 8(%rsp), %rsi movl $64, %edx movl $1, %ecx callq hipMemcpy movq %rbx, %rdi movl $1, %esi movq %r14, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB5_26 # %bb.25: leaq 128(%rsp), %rdi leaq 56(%rsp), %rsi leaq 80(%rsp), %rdx leaq 72(%rsp), %rcx callq __hipPopCallConfiguration movq 128(%rsp), %rsi movl 136(%rsp), %edx movq 56(%rsp), %rcx movl 64(%rsp), %r8d leaq 96(%rsp), %r9 movl $_Z10printIndexv, %edi pushq 72(%rsp) .cfi_adjust_cfa_offset 8 pushq 88(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB5_26: movq %rbx, %rdi movl $1, %esi movq %r14, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB5_28 # %bb.27: movq 32(%rsp), %rax movq 24(%rsp), %rcx movq 16(%rsp), %rdx movq %rax, 72(%rsp) movq %rcx, 96(%rsp) movq %rdx, 120(%rsp) leaq 72(%rsp), %rax movq %rax, 128(%rsp) leaq 96(%rsp), %rax movq %rax, 136(%rsp) leaq 120(%rsp), %rax movq %rax, 144(%rsp) leaq 56(%rsp), %rdi leaq 80(%rsp), %rsi leaq 112(%rsp), %rdx leaq 104(%rsp), %rcx callq __hipPopCallConfiguration movq 56(%rsp), %rsi movl 64(%rsp), %edx movq 80(%rsp), %rcx movl 88(%rsp), %r8d leaq 128(%rsp), %r9 movl $_Z7matMultPiS_S_, %edi pushq 104(%rsp) .cfi_adjust_cfa_offset 8 pushq 120(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB5_28: callq hipDeviceSynchronize movq 40(%rsp), %rdi movq 16(%rsp), %rsi movl $64, %edx movl $2, %ecx callq hipMemcpy movl $.Lstr.4, %edi callq puts@PLT movl $.Lstr.5, %edi callq puts@PLT movq 48(%rsp), %r14 xorl %ebx, %ebx .p2align 4, 0x90 .LBB5_29: # =>This Inner Loop Header: Depth=1 movl $.L.str, %edi movl %ebx, %esi xorl %eax, %eax callq printf incl %ebx cmpl $4, %ebx jne .LBB5_29 # %bb.30: movl $10, %edi callq putchar@PLT xorl %ebx, %ebx .p2align 4, 0x90 .LBB5_31: # =>This Loop Header: Depth=1 # Child Loop BB5_32 Depth 2 movl $.L.str.2, %edi movl %ebx, %esi xorl %eax, %eax callq printf xorl %r15d, %r15d .p2align 4, 0x90 .LBB5_32: # Parent Loop BB5_31 Depth=1 # => This Inner Loop Header: Depth=2 movl (%r14,%r15,4), %esi movl $.L.str.3, %edi xorl %eax, %eax callq printf incq %r15 cmpq $4, %r15 jne .LBB5_32 # %bb.33: # in Loop: Header=BB5_31 Depth=1 movl $10, %edi callq putchar@PLT incq %rbx addq $16, %r14 cmpq $4, %rbx jne .LBB5_31 # %bb.34: # %_Z8printMatPi.exit86 movl $.Lstr.6, %edi callq puts@PLT movq 8(%rsp), %r14 xorl %ebx, %ebx .p2align 4, 0x90 .LBB5_35: # =>This Inner Loop Header: Depth=1 movl $.L.str, %edi movl %ebx, %esi xorl %eax, %eax callq printf incl %ebx cmpl $4, %ebx jne .LBB5_35 # %bb.36: movl $10, %edi callq putchar@PLT xorl %ebx, %ebx .p2align 4, 0x90 .LBB5_37: # =>This Loop Header: Depth=1 # Child Loop BB5_38 Depth 2 movl $.L.str.2, %edi movl %ebx, %esi xorl %eax, %eax callq printf xorl %r15d, %r15d .p2align 4, 0x90 .LBB5_38: # Parent Loop BB5_37 Depth=1 # => This Inner Loop Header: Depth=2 movl (%r14,%r15,4), %esi movl $.L.str.3, %edi xorl %eax, %eax callq printf incq %r15 cmpq $4, %r15 jne .LBB5_38 # %bb.39: # in Loop: Header=BB5_37 Depth=1 movl $10, %edi callq putchar@PLT incq %rbx addq $16, %r14 cmpq $4, %rbx jne .LBB5_37 # %bb.40: # %_Z8printMatPi.exit97 movl $.Lstr.7, %edi callq puts@PLT movq 40(%rsp), %r14 xorl %ebx, %ebx .p2align 4, 0x90 .LBB5_41: # =>This Inner Loop Header: Depth=1 movl $.L.str, %edi movl %ebx, %esi xorl %eax, %eax callq printf incl %ebx cmpl $4, %ebx jne .LBB5_41 # %bb.42: movl $10, %edi callq putchar@PLT xorl %ebx, %ebx .p2align 4, 0x90 .LBB5_43: # =>This Loop Header: Depth=1 # Child Loop BB5_44 Depth 2 movl $.L.str.2, %edi movl %ebx, %esi xorl %eax, %eax callq printf xorl %r15d, %r15d .p2align 4, 0x90 .LBB5_44: # Parent Loop BB5_43 Depth=1 # => This Inner Loop Header: Depth=2 movl (%r14,%r15,4), %esi movl $.L.str.3, %edi xorl %eax, %eax callq printf incq %r15 cmpq $4, %r15 jne .LBB5_44 # %bb.45: # in Loop: Header=BB5_43 Depth=1 movl $10, %edi callq putchar@PLT incq %rbx addq $16, %r14 cmpq $4, %rbx jne .LBB5_43 # %bb.46: # %_Z8printMatPi.exit108 movq 32(%rsp), %rdi callq hipFree movq 24(%rsp), %rdi callq hipFree movq 16(%rsp), %rdi callq hipFree movq 48(%rsp), %rdi callq hipHostFree movq 8(%rsp), %rdi callq hipHostFree movq 40(%rsp), %rdi callq hipHostFree xorl %eax, %eax addq $160, %rsp .cfi_def_cfa_offset 48 popq %rbx .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end5: .size main, .Lfunc_end5-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $32, %rsp .cfi_def_cfa_offset 48 .cfi_offset %rbx, -16 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB6_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB6_2: movq __hip_gpubin_handle(%rip), %rbx xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z7matMultPiS_S_, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z10printIndexv, %esi movl $.L__unnamed_2, %edx movl $.L__unnamed_2, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z8dirtyMemPiS_S_, %esi movl $.L__unnamed_3, %edx movl $.L__unnamed_3, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $32, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end6: .size __hip_module_ctor, .Lfunc_end6-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB7_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB7_2: retq .Lfunc_end7: .size __hip_module_dtor, .Lfunc_end7-__hip_module_dtor .cfi_endproc # -- End function .type _Z7matMultPiS_S_,@object # @_Z7matMultPiS_S_ .section .rodata,"a",@progbits .globl _Z7matMultPiS_S_ .p2align 3, 0x0 _Z7matMultPiS_S_: .quad _Z22__device_stub__matMultPiS_S_ .size _Z7matMultPiS_S_, 8 .type _Z10printIndexv,@object # @_Z10printIndexv .globl _Z10printIndexv .p2align 3, 0x0 _Z10printIndexv: .quad _Z25__device_stub__printIndexv .size _Z10printIndexv, 8 .type _Z8dirtyMemPiS_S_,@object # @_Z8dirtyMemPiS_S_ .globl _Z8dirtyMemPiS_S_ .p2align 3, 0x0 _Z8dirtyMemPiS_S_: .quad _Z23__device_stub__dirtyMemPiS_S_ .size _Z8dirtyMemPiS_S_, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "\t(%d)" .size .L.str, 6 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz "(%d)" .size .L.str.2, 5 .type .L.str.3,@object # @.str.3 .L.str.3: .asciz "\t%d" .size .L.str.3, 4 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z7matMultPiS_S_" .size .L__unnamed_1, 17 .type .L__unnamed_2,@object # @1 .L__unnamed_2: .asciz "_Z10printIndexv" .size .L__unnamed_2, 16 .type .L__unnamed_3,@object # @2 .L__unnamed_3: .asciz "_Z8dirtyMemPiS_S_" .size .L__unnamed_3, 18 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .type .Lstr,@object # @str .section .rodata.str1.1,"aMS",@progbits,1 .Lstr: .asciz "\t ### Valores Inicializados na GPU ###" .size .Lstr, 39 .type .Lstr.4,@object # @str.4 .Lstr.4: .asciz "\t ### Valores ap\303\263s processamento em GPU ###" .size .Lstr.4, 45 .type .Lstr.5,@object # @str.5 .Lstr.5: .asciz "\t ### Matriz (a) ### " .size .Lstr.5, 22 .type .Lstr.6,@object # @str.6 .Lstr.6: .asciz "\t ### Matriz (b) ### " .size .Lstr.6, 22 .type .Lstr.7,@object # @str.7 .Lstr.7: .asciz "\t ### Matriz (c) ### " .size .Lstr.7, 22 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z22__device_stub__matMultPiS_S_ .addrsig_sym _Z25__device_stub__printIndexv .addrsig_sym _Z23__device_stub__dirtyMemPiS_S_ .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z7matMultPiS_S_ .addrsig_sym _Z10printIndexv .addrsig_sym _Z8dirtyMemPiS_S_ .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_001a3795_00000000-6_cuda.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2062: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2062: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z7initvetPiS_ .type _Z7initvetPiS_, @function _Z7initvetPiS_: .LFB2057: .cfi_startproc endbr64 movl $4, %ecx movl $0, %r10d movl $16, %r9d .L4: leal -4(%rcx), %eax movq %r10, %rdx salq $4, %rdx .L5: movl %eax, (%rsi,%rdx) movl %r9d, %r8d subl %eax, %r8d movl %r8d, (%rdi,%rdx) addl $1, %eax addq $4, %rdx cmpl %ecx, %eax jne .L5 addq $1, %r10 addl $4, %ecx cmpl $20, %ecx jne .L4 ret .cfi_endproc .LFE2057: .size _Z7initvetPiS_, .-_Z7initvetPiS_ .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "\t(%d)" .LC1: .string "\n" .LC2: .string "(%d)" .LC3: .string "\t%d" .text .globl _Z8printMatPi .type _Z8printMatPi, @function _Z8printMatPi: .LFB2058: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 subq $8, %rsp .cfi_def_cfa_offset 64 movq %rdi, %rbp movl $0, %ebx leaq .LC0(%rip), %r12 .L9: movl %ebx, %edx movq %r12, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT addl $1, %ebx cmpl $4, %ebx jne .L9 leaq .LC1(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT addq $16, %rbp movl $0, %r13d leaq .LC2(%rip), %r15 leaq .LC3(%rip), %r12 leaq .LC1(%rip), %r14 .L11: movl %r13d, %edx movq %r15, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT leaq -16(%rbp), %rbx .L10: movl (%rbx), %edx movq %r12, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT addq $4, %rbx cmpq %rbp, %rbx jne .L10 movq %r14, %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT addl $1, %r13d addq $16, %rbp cmpl $4, %r13d jne .L11 addq $8, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2058: .size _Z8printMatPi, .-_Z8printMatPi .globl _Z30__device_stub__Z7matMultPiS_S_PiS_S_ .type _Z30__device_stub__Z7matMultPiS_S_PiS_S_, @function _Z30__device_stub__Z7matMultPiS_S_PiS_S_: .LFB2084: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L20 .L16: movq 120(%rsp), %rax subq %fs:40, %rax jne .L21 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L20: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z7matMultPiS_S_(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L16 .L21: call __stack_chk_fail@PLT .cfi_endproc .LFE2084: .size _Z30__device_stub__Z7matMultPiS_S_PiS_S_, .-_Z30__device_stub__Z7matMultPiS_S_PiS_S_ .globl _Z7matMultPiS_S_ .type _Z7matMultPiS_S_, @function _Z7matMultPiS_S_: .LFB2085: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z30__device_stub__Z7matMultPiS_S_PiS_S_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2085: .size _Z7matMultPiS_S_, .-_Z7matMultPiS_S_ .globl _Z29__device_stub__Z10printIndexvv .type _Z29__device_stub__Z10printIndexvv, @function _Z29__device_stub__Z10printIndexvv: .LFB2086: .cfi_startproc endbr64 subq $88, %rsp .cfi_def_cfa_offset 96 movq %fs:40, %rax movq %rax, 72(%rsp) xorl %eax, %eax movl $1, 16(%rsp) movl $1, 20(%rsp) movl $1, 24(%rsp) movl $1, 28(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) leaq 8(%rsp), %rcx movq %rsp, %rdx leaq 28(%rsp), %rsi leaq 16(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L28 .L24: movq 72(%rsp), %rax subq %fs:40, %rax jne .L29 addq $88, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L28: .cfi_restore_state pushq 8(%rsp) .cfi_def_cfa_offset 104 pushq 8(%rsp) .cfi_def_cfa_offset 112 leaq 80(%rsp), %r9 movq 44(%rsp), %rcx movl 52(%rsp), %r8d movq 32(%rsp), %rsi movl 40(%rsp), %edx leaq _Z10printIndexv(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 96 jmp .L24 .L29: call __stack_chk_fail@PLT .cfi_endproc .LFE2086: .size _Z29__device_stub__Z10printIndexvv, .-_Z29__device_stub__Z10printIndexvv .globl _Z10printIndexv .type _Z10printIndexv, @function _Z10printIndexv: .LFB2087: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z29__device_stub__Z10printIndexvv addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2087: .size _Z10printIndexv, .-_Z10printIndexv .globl _Z31__device_stub__Z8dirtyMemPiS_S_PiS_S_ .type _Z31__device_stub__Z8dirtyMemPiS_S_PiS_S_, @function _Z31__device_stub__Z8dirtyMemPiS_S_PiS_S_: .LFB2088: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movq %rdx, 8(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 8(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L36 .L32: movq 120(%rsp), %rax subq %fs:40, %rax jne .L37 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L36: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z8dirtyMemPiS_S_(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L32 .L37: call __stack_chk_fail@PLT .cfi_endproc .LFE2088: .size _Z31__device_stub__Z8dirtyMemPiS_S_PiS_S_, .-_Z31__device_stub__Z8dirtyMemPiS_S_PiS_S_ .globl _Z8dirtyMemPiS_S_ .type _Z8dirtyMemPiS_S_, @function _Z8dirtyMemPiS_S_: .LFB2089: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z31__device_stub__Z8dirtyMemPiS_S_PiS_S_ addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2089: .size _Z8dirtyMemPiS_S_, .-_Z8dirtyMemPiS_S_ .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC4: .string "\t ### Valores Inicializados na GPU ###\n" .section .rodata.str1.1 .LC5: .string "\t ### Matriz (a) ### \n" .LC6: .string "\t ### Matriz (b) ### \n" .LC7: .string "\t ### Matriz (c) ### \n" .section .rodata.str1.8 .align 8 .LC8: .string "\t ### Valores ap\303\263s processamento em GPU ###\n" .text .globl main .type main, @function main: .LFB2059: .cfi_startproc endbr64 subq $88, %rsp .cfi_def_cfa_offset 96 movq %fs:40, %rax movq %rax, 72(%rsp) xorl %eax, %eax movq %rsp, %rdi movl $64, %esi call cudaMallocHost@PLT leaq 8(%rsp), %rdi movl $64, %esi call cudaMallocHost@PLT leaq 16(%rsp), %rdi movl $64, %esi call cudaMallocHost@PLT leaq 24(%rsp), %rdi movl $64, %esi call cudaMalloc@PLT leaq 32(%rsp), %rdi movl $64, %esi call cudaMalloc@PLT leaq 40(%rsp), %rdi movl $64, %esi call cudaMalloc@PLT movl $4, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) movl $4, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $0, %r9d movl $0, %r8d movq 60(%rsp), %rdx movl $1, %ecx movq 48(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L46 .L41: movl $2, %ecx movl $64, %edx movq 24(%rsp), %rsi movq (%rsp), %rdi call cudaMemcpy@PLT movl $2, %ecx movl $64, %edx movq 32(%rsp), %rsi movq 8(%rsp), %rdi call cudaMemcpy@PLT movl $2, %ecx movl $64, %edx movq 40(%rsp), %rsi movq 16(%rsp), %rdi call cudaMemcpy@PLT leaq .LC4(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT leaq .LC5(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movq (%rsp), %rdi call _Z8printMatPi leaq .LC6(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movq 8(%rsp), %rdi call _Z8printMatPi leaq .LC7(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movq 16(%rsp), %rdi call _Z8printMatPi movq 8(%rsp), %rsi movq (%rsp), %rdi call _Z7initvetPiS_ movl $1, %ecx movl $64, %edx movq (%rsp), %rsi movq 24(%rsp), %rdi call cudaMemcpy@PLT movl $1, %ecx movl $64, %edx movq 8(%rsp), %rsi movq 32(%rsp), %rdi call cudaMemcpy@PLT movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $4, 60(%rsp) movl $4, 64(%rsp) movl $1, 68(%rsp) movl $0, %r9d movl $0, %r8d movq 60(%rsp), %rdx movl $1, %ecx movq 48(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L47 .L42: movl 68(%rsp), %ecx movl $0, %r9d movl $0, %r8d movq 60(%rsp), %rdx movq 48(%rsp), %rdi movl 56(%rsp), %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L48 .L43: call cudaDeviceSynchronize@PLT movl $2, %ecx movl $64, %edx movq 40(%rsp), %rsi movq 16(%rsp), %rdi call cudaMemcpy@PLT leaq .LC8(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT leaq .LC5(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movq (%rsp), %rdi call _Z8printMatPi leaq .LC6(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movq 8(%rsp), %rdi call _Z8printMatPi leaq .LC7(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movq 16(%rsp), %rdi call _Z8printMatPi movq 24(%rsp), %rdi call cudaFree@PLT movq 32(%rsp), %rdi call cudaFree@PLT movq 40(%rsp), %rdi call cudaFree@PLT movq (%rsp), %rdi call cudaFreeHost@PLT movq 8(%rsp), %rdi call cudaFreeHost@PLT movq 16(%rsp), %rdi call cudaFreeHost@PLT movq 72(%rsp), %rax subq %fs:40, %rax jne .L49 movl $0, %eax addq $88, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L46: .cfi_restore_state movq 40(%rsp), %rdx movq 32(%rsp), %rsi movq 24(%rsp), %rdi call _Z31__device_stub__Z8dirtyMemPiS_S_PiS_S_ jmp .L41 .L47: call _Z29__device_stub__Z10printIndexvv jmp .L42 .L48: movq 40(%rsp), %rdx movq 32(%rsp), %rsi movq 24(%rsp), %rdi call _Z30__device_stub__Z7matMultPiS_S_PiS_S_ jmp .L43 .L49: call __stack_chk_fail@PLT .cfi_endproc .LFE2059: .size main, .-main .section .rodata.str1.1 .LC9: .string "_Z8dirtyMemPiS_S_" .LC10: .string "_Z10printIndexv" .LC11: .string "_Z7matMultPiS_S_" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2091: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rbx movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC9(%rip), %rdx movq %rdx, %rcx leaq _Z8dirtyMemPiS_S_(%rip), %rsi movq %rax, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC10(%rip), %rdx movq %rdx, %rcx leaq _Z10printIndexv(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC11(%rip), %rdx movq %rdx, %rcx leaq _Z7matMultPiS_S_(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2091: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "cuda.hip" .globl _Z22__device_stub__matMultPiS_S_ # -- Begin function _Z22__device_stub__matMultPiS_S_ .p2align 4, 0x90 .type _Z22__device_stub__matMultPiS_S_,@function _Z22__device_stub__matMultPiS_S_: # @_Z22__device_stub__matMultPiS_S_ .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z7matMultPiS_S_, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end0: .size _Z22__device_stub__matMultPiS_S_, .Lfunc_end0-_Z22__device_stub__matMultPiS_S_ .cfi_endproc # -- End function .globl _Z25__device_stub__printIndexv # -- Begin function _Z25__device_stub__printIndexv .p2align 4, 0x90 .type _Z25__device_stub__printIndexv,@function _Z25__device_stub__printIndexv: # @_Z25__device_stub__printIndexv .cfi_startproc # %bb.0: subq $56, %rsp .cfi_def_cfa_offset 64 leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 48(%rsp), %r9 movl $_Z10printIndexv, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $72, %rsp .cfi_adjust_cfa_offset -72 retq .Lfunc_end1: .size _Z25__device_stub__printIndexv, .Lfunc_end1-_Z25__device_stub__printIndexv .cfi_endproc # -- End function .globl _Z23__device_stub__dirtyMemPiS_S_ # -- Begin function _Z23__device_stub__dirtyMemPiS_S_ .p2align 4, 0x90 .type _Z23__device_stub__dirtyMemPiS_S_,@function _Z23__device_stub__dirtyMemPiS_S_: # @_Z23__device_stub__dirtyMemPiS_S_ .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movq %rdx, 56(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 56(%rsp), %rax movq %rax, 96(%rsp) leaq 40(%rsp), %rdi leaq 24(%rsp), %rsi leaq 16(%rsp), %rdx leaq 8(%rsp), %rcx callq __hipPopCallConfiguration movq 40(%rsp), %rsi movl 48(%rsp), %edx movq 24(%rsp), %rcx movl 32(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z8dirtyMemPiS_S_, %edi pushq 8(%rsp) .cfi_adjust_cfa_offset 8 pushq 24(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end2: .size _Z23__device_stub__dirtyMemPiS_S_, .Lfunc_end2-_Z23__device_stub__dirtyMemPiS_S_ .cfi_endproc # -- End function .globl _Z7initvetPiS_ # -- Begin function _Z7initvetPiS_ .p2align 4, 0x90 .type _Z7initvetPiS_,@function _Z7initvetPiS_: # @_Z7initvetPiS_ .cfi_startproc # %bb.0: movl $16, %eax xorl %ecx, %ecx xorl %edx, %edx .p2align 4, 0x90 .LBB3_1: # %.preheader # =>This Loop Header: Depth=1 # Child Loop BB3_2 Depth 2 movl %eax, %r8d xorl %r9d, %r9d .p2align 4, 0x90 .LBB3_2: # Parent Loop BB3_1 Depth=1 # => This Inner Loop Header: Depth=2 leal (%rcx,%r9), %r10d movl %r10d, (%rsi,%r9,4) movl %r8d, (%rdi,%r9,4) incq %r9 decl %r8d cmpq $4, %r9 jne .LBB3_2 # %bb.3: # in Loop: Header=BB3_1 Depth=1 incq %rdx addl $-4, %eax addq $4, %rcx addq $16, %rdi addq $16, %rsi cmpq $4, %rdx jne .LBB3_1 # %bb.4: retq .Lfunc_end3: .size _Z7initvetPiS_, .Lfunc_end3-_Z7initvetPiS_ .cfi_endproc # -- End function .globl _Z8printMatPi # -- Begin function _Z8printMatPi .p2align 4, 0x90 .type _Z8printMatPi,@function _Z8printMatPi: # @_Z8printMatPi .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %rbx .cfi_def_cfa_offset 40 pushq %rax .cfi_def_cfa_offset 48 .cfi_offset %rbx, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movq %rdi, %rbx xorl %ebp, %ebp .p2align 4, 0x90 .LBB4_1: # =>This Inner Loop Header: Depth=1 movl $.L.str, %edi movl %ebp, %esi xorl %eax, %eax callq printf incl %ebp cmpl $4, %ebp jne .LBB4_1 # %bb.2: movl $10, %edi callq putchar@PLT xorl %r14d, %r14d .p2align 4, 0x90 .LBB4_3: # =>This Loop Header: Depth=1 # Child Loop BB4_4 Depth 2 movl $.L.str.2, %edi movl %r14d, %esi xorl %eax, %eax callq printf xorl %r15d, %r15d .p2align 4, 0x90 .LBB4_4: # Parent Loop BB4_3 Depth=1 # => This Inner Loop Header: Depth=2 movl (%rbx,%r15,4), %esi movl $.L.str.3, %edi xorl %eax, %eax callq printf incq %r15 cmpq $4, %r15 jne .LBB4_4 # %bb.5: # in Loop: Header=BB4_3 Depth=1 movl $10, %edi callq putchar@PLT incq %r14 addq $16, %rbx cmpq $4, %r14 jne .LBB4_3 # %bb.6: addq $8, %rsp .cfi_def_cfa_offset 40 popq %rbx .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end4: .size _Z8printMatPi, .Lfunc_end4-_Z8printMatPi .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r12 .cfi_def_cfa_offset 40 pushq %rbx .cfi_def_cfa_offset 48 subq $160, %rsp .cfi_def_cfa_offset 208 .cfi_offset %rbx, -48 .cfi_offset %r12, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movabsq $4294967297, %rbx # imm = 0x100000001 leaq 48(%rsp), %rdi movl $64, %esi xorl %edx, %edx callq hipHostMalloc leaq 8(%rsp), %rdi movl $64, %esi xorl %edx, %edx callq hipHostMalloc leaq 40(%rsp), %rdi movl $64, %esi xorl %edx, %edx callq hipHostMalloc leaq 32(%rsp), %rdi movl $64, %esi callq hipMalloc leaq 24(%rsp), %rdi movl $64, %esi callq hipMalloc leaq 16(%rsp), %rdi movl $64, %esi callq hipMalloc leaq 3(%rbx), %rdi movl $1, %esi movq %rdi, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB5_2 # %bb.1: movq 32(%rsp), %rax movq 24(%rsp), %rcx movq 16(%rsp), %rdx movq %rax, 72(%rsp) movq %rcx, 96(%rsp) movq %rdx, 120(%rsp) leaq 72(%rsp), %rax movq %rax, 128(%rsp) leaq 96(%rsp), %rax movq %rax, 136(%rsp) leaq 120(%rsp), %rax movq %rax, 144(%rsp) leaq 56(%rsp), %rdi leaq 80(%rsp), %rsi leaq 112(%rsp), %rdx leaq 104(%rsp), %rcx callq __hipPopCallConfiguration movq 56(%rsp), %rsi movl 64(%rsp), %edx movq 80(%rsp), %rcx movl 88(%rsp), %r8d leaq 128(%rsp), %r9 movl $_Z8dirtyMemPiS_S_, %edi pushq 104(%rsp) .cfi_adjust_cfa_offset 8 pushq 120(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB5_2: movq 48(%rsp), %rdi movq 32(%rsp), %rsi movl $64, %edx movl $2, %ecx callq hipMemcpy movq 8(%rsp), %rdi movq 24(%rsp), %rsi movl $64, %edx movl $2, %ecx callq hipMemcpy movq 40(%rsp), %rdi movq 16(%rsp), %rsi movl $64, %edx movl $2, %ecx callq hipMemcpy movl $.Lstr, %edi callq puts@PLT movl $.Lstr.5, %edi callq puts@PLT movq 48(%rsp), %r15 xorl %ebp, %ebp .p2align 4, 0x90 .LBB5_3: # =>This Inner Loop Header: Depth=1 movl $.L.str, %edi movl %ebp, %esi xorl %eax, %eax callq printf incl %ebp cmpl $4, %ebp jne .LBB5_3 # %bb.4: movl $10, %edi callq putchar@PLT xorl %r14d, %r14d .p2align 4, 0x90 .LBB5_5: # =>This Loop Header: Depth=1 # Child Loop BB5_6 Depth 2 movl $.L.str.2, %edi movl %r14d, %esi xorl %eax, %eax callq printf xorl %r12d, %r12d .p2align 4, 0x90 .LBB5_6: # Parent Loop BB5_5 Depth=1 # => This Inner Loop Header: Depth=2 movl (%r15,%r12,4), %esi movl $.L.str.3, %edi xorl %eax, %eax callq printf incq %r12 cmpq $4, %r12 jne .LBB5_6 # %bb.7: # in Loop: Header=BB5_5 Depth=1 movl $10, %edi callq putchar@PLT incq %r14 addq $16, %r15 cmpq $4, %r14 jne .LBB5_5 # %bb.8: # %_Z8printMatPi.exit movl $.Lstr.6, %edi callq puts@PLT movq 8(%rsp), %r15 xorl %ebp, %ebp .p2align 4, 0x90 .LBB5_9: # =>This Inner Loop Header: Depth=1 movl $.L.str, %edi movl %ebp, %esi xorl %eax, %eax callq printf incl %ebp cmpl $4, %ebp jne .LBB5_9 # %bb.10: movl $10, %edi callq putchar@PLT xorl %r14d, %r14d .p2align 4, 0x90 .LBB5_11: # =>This Loop Header: Depth=1 # Child Loop BB5_12 Depth 2 movl $.L.str.2, %edi movl %r14d, %esi xorl %eax, %eax callq printf xorl %r12d, %r12d .p2align 4, 0x90 .LBB5_12: # Parent Loop BB5_11 Depth=1 # => This Inner Loop Header: Depth=2 movl (%r15,%r12,4), %esi movl $.L.str.3, %edi xorl %eax, %eax callq printf incq %r12 cmpq $4, %r12 jne .LBB5_12 # %bb.13: # in Loop: Header=BB5_11 Depth=1 movl $10, %edi callq putchar@PLT incq %r14 addq $16, %r15 cmpq $4, %r14 jne .LBB5_11 # %bb.14: # %_Z8printMatPi.exit49 movl $.Lstr.7, %edi callq puts@PLT movq 40(%rsp), %r15 xorl %ebp, %ebp .p2align 4, 0x90 .LBB5_15: # =>This Inner Loop Header: Depth=1 movl $.L.str, %edi movl %ebp, %esi xorl %eax, %eax callq printf incl %ebp cmpl $4, %ebp jne .LBB5_15 # %bb.16: movl $10, %edi callq putchar@PLT xorl %r14d, %r14d .p2align 4, 0x90 .LBB5_17: # =>This Loop Header: Depth=1 # Child Loop BB5_18 Depth 2 movl $.L.str.2, %edi movl %r14d, %esi xorl %eax, %eax callq printf xorl %r12d, %r12d .p2align 4, 0x90 .LBB5_18: # Parent Loop BB5_17 Depth=1 # => This Inner Loop Header: Depth=2 movl (%r15,%r12,4), %esi movl $.L.str.3, %edi xorl %eax, %eax callq printf incq %r12 cmpq $4, %r12 jne .LBB5_18 # %bb.19: # in Loop: Header=BB5_17 Depth=1 movl $10, %edi callq putchar@PLT incq %r14 addq $16, %r15 cmpq $4, %r14 jne .LBB5_17 # %bb.20: # %_Z8printMatPi.exit60 movq 48(%rsp), %rsi movl $16, %eax movq 8(%rsp), %rcx xorl %edx, %edx xorl %edi, %edi .p2align 4, 0x90 .LBB5_21: # %.preheader.i # =>This Loop Header: Depth=1 # Child Loop BB5_22 Depth 2 movq %rdx, %r8 xorl %r9d, %r9d .p2align 4, 0x90 .LBB5_22: # Parent Loop BB5_21 Depth=1 # => This Inner Loop Header: Depth=2 movl %r8d, (%rcx,%r8,4) leal (%rax,%r9), %r10d movl %r10d, (%rsi,%r8,4) decq %r9 incq %r8 cmpq $-4, %r9 jne .LBB5_22 # %bb.23: # in Loop: Header=BB5_21 Depth=1 incq %rdi addq $4, %rdx addq $-4, %rax cmpq $4, %rdi jne .LBB5_21 # %bb.24: # %_Z7initvetPiS_.exit movabsq $17179869188, %r14 # imm = 0x400000004 movq 32(%rsp), %rdi movl $64, %edx movl $1, %ecx callq hipMemcpy movq 24(%rsp), %rdi movq 8(%rsp), %rsi movl $64, %edx movl $1, %ecx callq hipMemcpy movq %rbx, %rdi movl $1, %esi movq %r14, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB5_26 # %bb.25: leaq 128(%rsp), %rdi leaq 56(%rsp), %rsi leaq 80(%rsp), %rdx leaq 72(%rsp), %rcx callq __hipPopCallConfiguration movq 128(%rsp), %rsi movl 136(%rsp), %edx movq 56(%rsp), %rcx movl 64(%rsp), %r8d leaq 96(%rsp), %r9 movl $_Z10printIndexv, %edi pushq 72(%rsp) .cfi_adjust_cfa_offset 8 pushq 88(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB5_26: movq %rbx, %rdi movl $1, %esi movq %r14, %rdx movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB5_28 # %bb.27: movq 32(%rsp), %rax movq 24(%rsp), %rcx movq 16(%rsp), %rdx movq %rax, 72(%rsp) movq %rcx, 96(%rsp) movq %rdx, 120(%rsp) leaq 72(%rsp), %rax movq %rax, 128(%rsp) leaq 96(%rsp), %rax movq %rax, 136(%rsp) leaq 120(%rsp), %rax movq %rax, 144(%rsp) leaq 56(%rsp), %rdi leaq 80(%rsp), %rsi leaq 112(%rsp), %rdx leaq 104(%rsp), %rcx callq __hipPopCallConfiguration movq 56(%rsp), %rsi movl 64(%rsp), %edx movq 80(%rsp), %rcx movl 88(%rsp), %r8d leaq 128(%rsp), %r9 movl $_Z7matMultPiS_S_, %edi pushq 104(%rsp) .cfi_adjust_cfa_offset 8 pushq 120(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB5_28: callq hipDeviceSynchronize movq 40(%rsp), %rdi movq 16(%rsp), %rsi movl $64, %edx movl $2, %ecx callq hipMemcpy movl $.Lstr.4, %edi callq puts@PLT movl $.Lstr.5, %edi callq puts@PLT movq 48(%rsp), %r14 xorl %ebx, %ebx .p2align 4, 0x90 .LBB5_29: # =>This Inner Loop Header: Depth=1 movl $.L.str, %edi movl %ebx, %esi xorl %eax, %eax callq printf incl %ebx cmpl $4, %ebx jne .LBB5_29 # %bb.30: movl $10, %edi callq putchar@PLT xorl %ebx, %ebx .p2align 4, 0x90 .LBB5_31: # =>This Loop Header: Depth=1 # Child Loop BB5_32 Depth 2 movl $.L.str.2, %edi movl %ebx, %esi xorl %eax, %eax callq printf xorl %r15d, %r15d .p2align 4, 0x90 .LBB5_32: # Parent Loop BB5_31 Depth=1 # => This Inner Loop Header: Depth=2 movl (%r14,%r15,4), %esi movl $.L.str.3, %edi xorl %eax, %eax callq printf incq %r15 cmpq $4, %r15 jne .LBB5_32 # %bb.33: # in Loop: Header=BB5_31 Depth=1 movl $10, %edi callq putchar@PLT incq %rbx addq $16, %r14 cmpq $4, %rbx jne .LBB5_31 # %bb.34: # %_Z8printMatPi.exit86 movl $.Lstr.6, %edi callq puts@PLT movq 8(%rsp), %r14 xorl %ebx, %ebx .p2align 4, 0x90 .LBB5_35: # =>This Inner Loop Header: Depth=1 movl $.L.str, %edi movl %ebx, %esi xorl %eax, %eax callq printf incl %ebx cmpl $4, %ebx jne .LBB5_35 # %bb.36: movl $10, %edi callq putchar@PLT xorl %ebx, %ebx .p2align 4, 0x90 .LBB5_37: # =>This Loop Header: Depth=1 # Child Loop BB5_38 Depth 2 movl $.L.str.2, %edi movl %ebx, %esi xorl %eax, %eax callq printf xorl %r15d, %r15d .p2align 4, 0x90 .LBB5_38: # Parent Loop BB5_37 Depth=1 # => This Inner Loop Header: Depth=2 movl (%r14,%r15,4), %esi movl $.L.str.3, %edi xorl %eax, %eax callq printf incq %r15 cmpq $4, %r15 jne .LBB5_38 # %bb.39: # in Loop: Header=BB5_37 Depth=1 movl $10, %edi callq putchar@PLT incq %rbx addq $16, %r14 cmpq $4, %rbx jne .LBB5_37 # %bb.40: # %_Z8printMatPi.exit97 movl $.Lstr.7, %edi callq puts@PLT movq 40(%rsp), %r14 xorl %ebx, %ebx .p2align 4, 0x90 .LBB5_41: # =>This Inner Loop Header: Depth=1 movl $.L.str, %edi movl %ebx, %esi xorl %eax, %eax callq printf incl %ebx cmpl $4, %ebx jne .LBB5_41 # %bb.42: movl $10, %edi callq putchar@PLT xorl %ebx, %ebx .p2align 4, 0x90 .LBB5_43: # =>This Loop Header: Depth=1 # Child Loop BB5_44 Depth 2 movl $.L.str.2, %edi movl %ebx, %esi xorl %eax, %eax callq printf xorl %r15d, %r15d .p2align 4, 0x90 .LBB5_44: # Parent Loop BB5_43 Depth=1 # => This Inner Loop Header: Depth=2 movl (%r14,%r15,4), %esi movl $.L.str.3, %edi xorl %eax, %eax callq printf incq %r15 cmpq $4, %r15 jne .LBB5_44 # %bb.45: # in Loop: Header=BB5_43 Depth=1 movl $10, %edi callq putchar@PLT incq %rbx addq $16, %r14 cmpq $4, %rbx jne .LBB5_43 # %bb.46: # %_Z8printMatPi.exit108 movq 32(%rsp), %rdi callq hipFree movq 24(%rsp), %rdi callq hipFree movq 16(%rsp), %rdi callq hipFree movq 48(%rsp), %rdi callq hipHostFree movq 8(%rsp), %rdi callq hipHostFree movq 40(%rsp), %rdi callq hipHostFree xorl %eax, %eax addq $160, %rsp .cfi_def_cfa_offset 48 popq %rbx .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .Lfunc_end5: .size main, .Lfunc_end5-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $32, %rsp .cfi_def_cfa_offset 48 .cfi_offset %rbx, -16 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB6_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB6_2: movq __hip_gpubin_handle(%rip), %rbx xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z7matMultPiS_S_, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z10printIndexv, %esi movl $.L__unnamed_2, %edx movl $.L__unnamed_2, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z8dirtyMemPiS_S_, %esi movl $.L__unnamed_3, %edx movl $.L__unnamed_3, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $32, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end6: .size __hip_module_ctor, .Lfunc_end6-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB7_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB7_2: retq .Lfunc_end7: .size __hip_module_dtor, .Lfunc_end7-__hip_module_dtor .cfi_endproc # -- End function .type _Z7matMultPiS_S_,@object # @_Z7matMultPiS_S_ .section .rodata,"a",@progbits .globl _Z7matMultPiS_S_ .p2align 3, 0x0 _Z7matMultPiS_S_: .quad _Z22__device_stub__matMultPiS_S_ .size _Z7matMultPiS_S_, 8 .type _Z10printIndexv,@object # @_Z10printIndexv .globl _Z10printIndexv .p2align 3, 0x0 _Z10printIndexv: .quad _Z25__device_stub__printIndexv .size _Z10printIndexv, 8 .type _Z8dirtyMemPiS_S_,@object # @_Z8dirtyMemPiS_S_ .globl _Z8dirtyMemPiS_S_ .p2align 3, 0x0 _Z8dirtyMemPiS_S_: .quad _Z23__device_stub__dirtyMemPiS_S_ .size _Z8dirtyMemPiS_S_, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "\t(%d)" .size .L.str, 6 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz "(%d)" .size .L.str.2, 5 .type .L.str.3,@object # @.str.3 .L.str.3: .asciz "\t%d" .size .L.str.3, 4 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z7matMultPiS_S_" .size .L__unnamed_1, 17 .type .L__unnamed_2,@object # @1 .L__unnamed_2: .asciz "_Z10printIndexv" .size .L__unnamed_2, 16 .type .L__unnamed_3,@object # @2 .L__unnamed_3: .asciz "_Z8dirtyMemPiS_S_" .size .L__unnamed_3, 18 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .type .Lstr,@object # @str .section .rodata.str1.1,"aMS",@progbits,1 .Lstr: .asciz "\t ### Valores Inicializados na GPU ###" .size .Lstr, 39 .type .Lstr.4,@object # @str.4 .Lstr.4: .asciz "\t ### Valores ap\303\263s processamento em GPU ###" .size .Lstr.4, 45 .type .Lstr.5,@object # @str.5 .Lstr.5: .asciz "\t ### Matriz (a) ### " .size .Lstr.5, 22 .type .Lstr.6,@object # @str.6 .Lstr.6: .asciz "\t ### Matriz (b) ### " .size .Lstr.6, 22 .type .Lstr.7,@object # @str.7 .Lstr.7: .asciz "\t ### Matriz (c) ### " .size .Lstr.7, 22 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z22__device_stub__matMultPiS_S_ .addrsig_sym _Z25__device_stub__printIndexv .addrsig_sym _Z23__device_stub__dirtyMemPiS_S_ .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z7matMultPiS_S_ .addrsig_sym _Z10printIndexv .addrsig_sym _Z8dirtyMemPiS_S_ .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include <iostream> #include <memory> __global__ void tensor_1d_assign(float *tensor, size_t tensor_size) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < tensor_size) { tensor[i] = i; } } void tensor_1d_test() { float *device_ptr; cudaMalloc(&device_ptr, sizeof(float) * 1024 * 1024); const int block_dim = 256; const int grid_dim = 1024 * 1024 / 256; tensor_1d_assign<<<grid_dim, block_dim>>>(device_ptr, 1024 * 1024); auto host_ptr = std::unique_ptr<float[]>(new float[1024 * 1024]); cudaMemcpy(host_ptr.get(), device_ptr, sizeof(float) * 1024 * 1024, cudaMemcpyDeviceToHost); cudaFree(device_ptr); bool is_ok = true; for (auto i = 0; i < 1024 * 1024; ++i) { if (host_ptr[i] != i) { is_ok = false; std::cout << "host_ptr[" << i << "] = " << host_ptr[i] << std::endl; break; } } if (is_ok) { std::cout << "ok" << std::endl; } else { std::cout << "wrong" << std::endl; } } __global__ void tensor_2d_assign(float *tensor, int width, int height, size_t pitch) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; if (!(i < height && j < width)) { return; } float *tensor_row = reinterpret_cast<float *>(reinterpret_cast<char *>(tensor) + i * pitch); tensor_row[j] = i * width + j; } void tensor_2d_test() { const int width = 4096; const int height = 1024; size_t pitch = 0u; float *device_ptr; cudaMallocPitch(&device_ptr, &pitch, width * sizeof(float), height); dim3 block_dim(16, 16); dim3 grid_dim(1024 / 16, 4096 / 16); tensor_2d_assign<<<grid_dim, block_dim>>>(device_ptr, width, height, pitch); auto host_ptr = std::unique_ptr<float[]>(new float[width * height]); cudaMemcpy2D(host_ptr.get(), width * sizeof(float), device_ptr, pitch, width * sizeof(float), height, cudaMemcpyDeviceToHost); cudaFree(device_ptr); bool is_ok = true; auto value = 0; for (auto i = 0; i < height; ++i) { for (auto j = 0; j < width; ++j, ++value) { if (host_ptr[width * i + j] != value) { std::cout << host_ptr[width * i + j] << std::endl; break; } } if (!is_ok) { break; } } if (is_ok) { std::cout << "ok" << std::endl; } else { std::cout << "wrong" << std::endl; } } __global__ void tensor_3d_assign(cudaPitchedPtr pitched_ptr, int depth, int height, int width) { const size_t pitch = pitched_ptr.pitch; const size_t slice_pitch = pitch * height; int d_idx = blockDim.x * blockIdx.x + threadIdx.x; int h_idx = blockDim.y * blockIdx.y + threadIdx.y; int w_idx = blockDim.z * blockIdx.z + threadIdx.z; char *slice_ptr = reinterpret_cast<char *>(pitched_ptr.ptr) + d_idx * slice_pitch; int *row = reinterpret_cast<int *>(slice_ptr + h_idx * pitch); row[w_idx] = d_idx * height * width + h_idx * width + w_idx; } void tensor_3d_test() { const int depth = 32; const int height = 64; const int width = 128; cudaExtent extent = make_cudaExtent(width * sizeof(int), height, depth); cudaPitchedPtr pitched_ptr; cudaMalloc3D(&pitched_ptr, extent); dim3 block_dim(4, 4, 4); dim3 grid_dim(depth / 4, height / 4, width / 4); tensor_3d_assign<<<grid_dim, block_dim>>>(pitched_ptr, depth, height, width); int host_ptr[32][64][128]; cudaMemcpy3DParms memcpy_params; memcpy_params.srcPtr = pitched_ptr; memcpy_params.dstPtr.ptr = host_ptr; memcpy_params.dstPtr.pitch = width * sizeof(int); memcpy_params.dstPtr.xsize = width; memcpy_params.dstPtr.ysize = height; memcpy_params.kind = cudaMemcpyDeviceToHost; memcpy_params.extent.depth = depth; memcpy_params.extent.height = height; memcpy_params.extent.width = width * sizeof(int); cudaMemcpy3D(&memcpy_params); cudaFree(pitched_ptr.ptr); bool is_ok = true; int value = 0; for (int d = 0; d < depth; ++d) { for (int h = 0; h < height; ++h) { for (int w = 0; w < width; ++w, ++value) { if (host_ptr[d][h][w] != value) { std::cout << "wrong result. host_ptr[" << d << "][" << h << "][" << w << "] = " << host_ptr[d][h][w] << ",, value = " << value << std::endl; is_ok = false; break; } } if (!is_ok) break; } if (!is_ok) break; } if (is_ok) { std::cout << "ok" << std::endl; } else { std::cout << "wrong" << std::endl; } } int main() { // tensor_1d_test(); // tensor_2d_test(); tensor_3d_test(); return 0; }
code for sm_80 Function : _Z16tensor_3d_assign14cudaPitchedPtriii .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R2, SR_CTAID.Y ; /* 0x0000000000027919 */ /* 0x000e220000002600 */ /*0020*/ MOV R7, c[0x0][0x168] ; /* 0x00005a0000077a02 */ /* 0x000fe20000000f00 */ /*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe40000000a00 */ /*0040*/ S2R R3, SR_TID.Y ; /* 0x0000000000037919 */ /* 0x000e280000002200 */ /*0050*/ S2R R9, SR_CTAID.X ; /* 0x0000000000097919 */ /* 0x000e680000002500 */ /*0060*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */ /* 0x000e680000002100 */ /*0070*/ S2R R8, SR_CTAID.Z ; /* 0x0000000000087919 */ /* 0x000ea80000002700 */ /*0080*/ S2R R11, SR_TID.Z ; /* 0x00000000000b7919 */ /* 0x000ea20000002300 */ /*0090*/ IMAD R2, R2, c[0x0][0x4], R3 ; /* 0x0000010002027a24 */ /* 0x001fca00078e0203 */ /*00a0*/ SHF.R.S32.HI R3, RZ, 0x1f, R2 ; /* 0x0000001fff037819 */ /* 0x000fe20000011402 */ /*00b0*/ IMAD R9, R9, c[0x0][0x0], R0 ; /* 0x0000000009097a24 */ /* 0x002fc800078e0200 */ /*00c0*/ IMAD.WIDE R4, R9, c[0x0][0x184], R2 ; /* 0x0000610009047a25 */ /* 0x000fc800078e0202 */ /*00d0*/ IMAD R3, R5, c[0x0][0x168], RZ ; /* 0x00005a0005037a24 */ /* 0x000fe400078e02ff */ /*00e0*/ IMAD.WIDE.U32 R6, R4, R7, c[0x0][0x160] ; /* 0x0000580004067625 */ /* 0x000fc800078e0007 */ /*00f0*/ IMAD R5, R4, c[0x0][0x16c], R3 ; /* 0x00005b0004057a24 */ /* 0x000fe400078e0203 */ /*0100*/ IMAD R3, R8, c[0x0][0x8], R11 ; /* 0x0000020008037a24 */ /* 0x004fe400078e020b */ /*0110*/ IMAD R2, R9, c[0x0][0x184], R2 ; /* 0x0000610009027a24 */ /* 0x000fe200078e0202 */ /*0120*/ IADD3 R7, R7, R5, RZ ; /* 0x0000000507077210 */ /* 0x000fc60007ffe0ff */ /*0130*/ IMAD R5, R2, c[0x0][0x188], R3 ; /* 0x0000620002057a24 */ /* 0x000fe400078e0203 */ /*0140*/ IMAD.WIDE R2, R3, 0x4, R6 ; /* 0x0000000403027825 */ /* 0x000fca00078e0206 */ /*0150*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */ /* 0x000fe2000c101904 */ /*0160*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0170*/ BRA 0x170; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ .......... Function : _Z16tensor_2d_assignPfiim .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */ /* 0x000e280000002500 */ /*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e280000002100 */ /*0030*/ S2R R5, SR_CTAID.Y ; /* 0x0000000000057919 */ /* 0x000e680000002600 */ /*0040*/ S2R R2, SR_TID.Y ; /* 0x0000000000027919 */ /* 0x000e620000002200 */ /*0050*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */ /* 0x001fca00078e0203 */ /*0060*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x16c], PT ; /* 0x00005b0000007a0c */ /* 0x000fe20003f06270 */ /*0070*/ IMAD R5, R5, c[0x0][0x4], R2 ; /* 0x0000010005057a24 */ /* 0x002fca00078e0202 */ /*0080*/ ISETP.GE.OR P0, PT, R5, c[0x0][0x168], P0 ; /* 0x00005a0005007a0c */ /* 0x000fda0000706670 */ /*0090*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*00a0*/ SHF.R.S32.HI R2, RZ, 0x1f, R0 ; /* 0x0000001fff027819 */ /* 0x000fe20000011400 */ /*00b0*/ IMAD R4, R0, c[0x0][0x168], R5 ; /* 0x00005a0000047a24 */ /* 0x000fe200078e0205 */ /*00c0*/ MOV R3, c[0x0][0x170] ; /* 0x00005c0000037a02 */ /* 0x000fe20000000f00 */ /*00d0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe40000000a00 */ /*00e0*/ IMAD R9, R2, c[0x0][0x170], RZ ; /* 0x00005c0002097a24 */ /* 0x000fe200078e02ff */ /*00f0*/ I2F R7, R4 ; /* 0x0000000400077306 */ /* 0x000e220000201400 */ /*0100*/ IMAD.WIDE.U32 R2, R0, R3, c[0x0][0x160] ; /* 0x0000580000027625 */ /* 0x000fc800078e0003 */ /*0110*/ IMAD R9, R0, c[0x0][0x174], R9 ; /* 0x00005d0000097a24 */ /* 0x000fca00078e0209 */ /*0120*/ IADD3 R3, R3, R9, RZ ; /* 0x0000000903037210 */ /* 0x000fca0007ffe0ff */ /*0130*/ IMAD.WIDE R2, R5, 0x4, R2 ; /* 0x0000000405027825 */ /* 0x000fca00078e0202 */ /*0140*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */ /* 0x001fe2000c101904 */ /*0150*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0160*/ BRA 0x160; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ .......... Function : _Z16tensor_1d_assignPfm .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */ /* 0x000e280000002500 */ /*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e240000002100 */ /*0030*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */ /* 0x001fca00078e0203 */ /*0040*/ ISETP.GE.U32.AND P0, PT, R0, c[0x0][0x168], PT ; /* 0x00005a0000007a0c */ /* 0x000fe40003f06070 */ /*0050*/ SHF.R.S32.HI R3, RZ, 0x1f, R0 ; /* 0x0000001fff037819 */ /* 0x000fc80000011400 */ /*0060*/ ISETP.GE.U32.AND.EX P0, PT, R3, c[0x0][0x16c], PT, P0 ; /* 0x00005b0003007a0c */ /* 0x000fda0003f06100 */ /*0070*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0080*/ I2F R5, R0 ; /* 0x0000000000057306 */ /* 0x000e220000201400 */ /*0090*/ LEA R2, P0, R0, c[0x0][0x160], 0x2 ; /* 0x0000580000027a11 */ /* 0x000fe200078010ff */ /*00a0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fc60000000a00 */ /*00b0*/ LEA.HI.X R3, R0, c[0x0][0x164], R3, 0x2, P0 ; /* 0x0000590000037a11 */ /* 0x000fca00000f1403 */ /*00c0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */ /* 0x001fe2000c101904 */ /*00d0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*00e0*/ BRA 0xe0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*00f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0100*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0110*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0120*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0130*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include <iostream> #include <memory> __global__ void tensor_1d_assign(float *tensor, size_t tensor_size) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < tensor_size) { tensor[i] = i; } } void tensor_1d_test() { float *device_ptr; cudaMalloc(&device_ptr, sizeof(float) * 1024 * 1024); const int block_dim = 256; const int grid_dim = 1024 * 1024 / 256; tensor_1d_assign<<<grid_dim, block_dim>>>(device_ptr, 1024 * 1024); auto host_ptr = std::unique_ptr<float[]>(new float[1024 * 1024]); cudaMemcpy(host_ptr.get(), device_ptr, sizeof(float) * 1024 * 1024, cudaMemcpyDeviceToHost); cudaFree(device_ptr); bool is_ok = true; for (auto i = 0; i < 1024 * 1024; ++i) { if (host_ptr[i] != i) { is_ok = false; std::cout << "host_ptr[" << i << "] = " << host_ptr[i] << std::endl; break; } } if (is_ok) { std::cout << "ok" << std::endl; } else { std::cout << "wrong" << std::endl; } } __global__ void tensor_2d_assign(float *tensor, int width, int height, size_t pitch) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; if (!(i < height && j < width)) { return; } float *tensor_row = reinterpret_cast<float *>(reinterpret_cast<char *>(tensor) + i * pitch); tensor_row[j] = i * width + j; } void tensor_2d_test() { const int width = 4096; const int height = 1024; size_t pitch = 0u; float *device_ptr; cudaMallocPitch(&device_ptr, &pitch, width * sizeof(float), height); dim3 block_dim(16, 16); dim3 grid_dim(1024 / 16, 4096 / 16); tensor_2d_assign<<<grid_dim, block_dim>>>(device_ptr, width, height, pitch); auto host_ptr = std::unique_ptr<float[]>(new float[width * height]); cudaMemcpy2D(host_ptr.get(), width * sizeof(float), device_ptr, pitch, width * sizeof(float), height, cudaMemcpyDeviceToHost); cudaFree(device_ptr); bool is_ok = true; auto value = 0; for (auto i = 0; i < height; ++i) { for (auto j = 0; j < width; ++j, ++value) { if (host_ptr[width * i + j] != value) { std::cout << host_ptr[width * i + j] << std::endl; break; } } if (!is_ok) { break; } } if (is_ok) { std::cout << "ok" << std::endl; } else { std::cout << "wrong" << std::endl; } } __global__ void tensor_3d_assign(cudaPitchedPtr pitched_ptr, int depth, int height, int width) { const size_t pitch = pitched_ptr.pitch; const size_t slice_pitch = pitch * height; int d_idx = blockDim.x * blockIdx.x + threadIdx.x; int h_idx = blockDim.y * blockIdx.y + threadIdx.y; int w_idx = blockDim.z * blockIdx.z + threadIdx.z; char *slice_ptr = reinterpret_cast<char *>(pitched_ptr.ptr) + d_idx * slice_pitch; int *row = reinterpret_cast<int *>(slice_ptr + h_idx * pitch); row[w_idx] = d_idx * height * width + h_idx * width + w_idx; } void tensor_3d_test() { const int depth = 32; const int height = 64; const int width = 128; cudaExtent extent = make_cudaExtent(width * sizeof(int), height, depth); cudaPitchedPtr pitched_ptr; cudaMalloc3D(&pitched_ptr, extent); dim3 block_dim(4, 4, 4); dim3 grid_dim(depth / 4, height / 4, width / 4); tensor_3d_assign<<<grid_dim, block_dim>>>(pitched_ptr, depth, height, width); int host_ptr[32][64][128]; cudaMemcpy3DParms memcpy_params; memcpy_params.srcPtr = pitched_ptr; memcpy_params.dstPtr.ptr = host_ptr; memcpy_params.dstPtr.pitch = width * sizeof(int); memcpy_params.dstPtr.xsize = width; memcpy_params.dstPtr.ysize = height; memcpy_params.kind = cudaMemcpyDeviceToHost; memcpy_params.extent.depth = depth; memcpy_params.extent.height = height; memcpy_params.extent.width = width * sizeof(int); cudaMemcpy3D(&memcpy_params); cudaFree(pitched_ptr.ptr); bool is_ok = true; int value = 0; for (int d = 0; d < depth; ++d) { for (int h = 0; h < height; ++h) { for (int w = 0; w < width; ++w, ++value) { if (host_ptr[d][h][w] != value) { std::cout << "wrong result. host_ptr[" << d << "][" << h << "][" << w << "] = " << host_ptr[d][h][w] << ",, value = " << value << std::endl; is_ok = false; break; } } if (!is_ok) break; } if (!is_ok) break; } if (is_ok) { std::cout << "ok" << std::endl; } else { std::cout << "wrong" << std::endl; } } int main() { // tensor_1d_test(); // tensor_2d_test(); tensor_3d_test(); return 0; }
.file "tmpxft_000a254f_00000000-6_chap3.cudafe1.cpp" .text #APP .globl _ZSt21ios_base_library_initv #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB4327: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE4327: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z37__device_stub__Z16tensor_1d_assignPfmPfm .type _Z37__device_stub__Z16tensor_1d_assignPfmPfm, @function _Z37__device_stub__Z16tensor_1d_assignPfmPfm: .LFB4349: .cfi_startproc endbr64 subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 8(%rsp) movq %rsi, (%rsp) movq %fs:40, %rax movq %rax, 104(%rsp) xorl %eax, %eax leaq 8(%rsp), %rax movq %rax, 80(%rsp) movq %rsp, %rax movq %rax, 88(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 104(%rsp), %rax subq %fs:40, %rax jne .L8 addq $120, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 136 pushq 24(%rsp) .cfi_def_cfa_offset 144 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z16tensor_1d_assignPfm(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 128 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE4349: .size _Z37__device_stub__Z16tensor_1d_assignPfmPfm, .-_Z37__device_stub__Z16tensor_1d_assignPfmPfm .globl _Z16tensor_1d_assignPfm .type _Z16tensor_1d_assignPfm, @function _Z16tensor_1d_assignPfm: .LFB4350: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z37__device_stub__Z16tensor_1d_assignPfmPfm addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE4350: .size _Z16tensor_1d_assignPfm, .-_Z16tensor_1d_assignPfm .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "host_ptr[" .LC1: .string "] = " .LC2: .string "wrong" .LC3: .string "ok" .text .globl _Z14tensor_1d_testv .type _Z14tensor_1d_testv, @function _Z14tensor_1d_testv: .LFB4313: .cfi_startproc .cfi_personality 0x9b,DW.ref.__gxx_personality_v0 .cfi_lsda 0x1b,.LLSDA4313 endbr64 pushq %r12 .cfi_def_cfa_offset 16 .cfi_offset 12, -16 pushq %rbp .cfi_def_cfa_offset 24 .cfi_offset 6, -24 pushq %rbx .cfi_def_cfa_offset 32 .cfi_offset 3, -32 subq $48, %rsp .cfi_def_cfa_offset 80 movq %fs:40, %rax movq %rax, 40(%rsp) xorl %eax, %eax leaq 8(%rsp), %rdi movl $4194304, %esi .LEHB0: call cudaMalloc@PLT movl $256, 28(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $4096, 16(%rsp) movl $1, 20(%rsp) movl $1, 24(%rsp) movl $0, %r9d movl $0, %r8d movq 28(%rsp), %rdx movl $1, %ecx movq 16(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L39 .L12: movl $4194304, %edi call _Znam@PLT .LEHE0: movq %rax, %r12 movl $2, %ecx movl $4194304, %edx movq 8(%rsp), %rsi movq %rax, %rdi .LEHB1: call cudaMemcpy@PLT .LEHE1: jmp .L40 .L39: movl $1048576, %esi movq 8(%rsp), %rdi .LEHB2: call _Z37__device_stub__Z16tensor_1d_assignPfmPfm .LEHE2: jmp .L12 .L40: movq 8(%rsp), %rdi .LEHB3: call cudaFree@PLT movq %r12, %rax movl $0, %ebx .L21: movq %rax, %rbp pxor %xmm0, %xmm0 cvtsi2ssl %ebx, %xmm0 ucomiss (%rax), %xmm0 jp .L34 jne .L34 addl $1, %ebx addq $4, %rax cmpl $1048576, %ebx jne .L21 movl $2, %edx leaq .LC3(%rip), %rsi leaq _ZSt4cout(%rip), %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT jmp .L41 .L34: movl $9, %edx leaq .LC0(%rip), %rsi leaq _ZSt4cout(%rip), %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movl %ebx, %esi leaq _ZSt4cout(%rip), %rdi call _ZNSolsEi@PLT movq %rax, %rbx movl $4, %edx leaq .LC1(%rip), %rsi movq %rax, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT pxor %xmm0, %xmm0 cvtss2sd 0(%rbp), %xmm0 movq %rbx, %rdi call _ZNSo9_M_insertIdEERSoT_@PLT movq %rax, %rbx movq (%rax), %rax movq -24(%rax), %rax movq 240(%rbx,%rax), %rbp testq %rbp, %rbp je .L42 cmpb $0, 56(%rbp) je .L17 movzbl 67(%rbp), %esi .L18: movsbl %sil, %esi movq %rbx, %rdi call _ZNSo3putEc@PLT jmp .L43 .L42: movq 40(%rsp), %rax subq %fs:40, %rax jne .L44 call _ZSt16__throw_bad_castv@PLT .L33: endbr64 movq %rax, %rbx movq %r12, %rdi call _ZdaPv@PLT movq 40(%rsp), %rax subq %fs:40, %rax je .L31 call __stack_chk_fail@PLT .L44: call __stack_chk_fail@PLT .L17: movq %rbp, %rdi call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT movq 0(%rbp), %rax movl $10, %esi movq %rbp, %rdi call *48(%rax) movl %eax, %esi jmp .L18 .L43: movq %rax, %rdi call _ZNSo5flushEv@PLT movl $5, %edx leaq .LC2(%rip), %rsi leaq _ZSt4cout(%rip), %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movq _ZSt4cout(%rip), %rax movq -24(%rax), %rax leaq _ZSt4cout(%rip), %rdx movq 240(%rdx,%rax), %rbx testq %rbx, %rbx je .L45 cmpb $0, 56(%rbx) je .L28 movzbl 67(%rbx), %esi .L29: movsbl %sil, %esi leaq _ZSt4cout(%rip), %rdi call _ZNSo3putEc@PLT jmp .L46 .L41: movq _ZSt4cout(%rip), %rax movq -24(%rax), %rax leaq _ZSt4cout(%rip), %rdx movq 240(%rdx,%rax), %rbx testq %rbx, %rbx je .L47 cmpb $0, 56(%rbx) je .L24 movzbl 67(%rbx), %eax .L25: movsbl %al, %esi leaq _ZSt4cout(%rip), %rdi call _ZNSo3putEc@PLT jmp .L48 .L47: movq 40(%rsp), %rax subq %fs:40, %rax jne .L49 call _ZSt16__throw_bad_castv@PLT .L49: call __stack_chk_fail@PLT .L24: movq %rbx, %rdi call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT movq (%rbx), %rax movl $10, %esi movq %rbx, %rdi call *48(%rax) jmp .L25 .L48: movq %rax, %rdi call _ZNSo5flushEv@PLT jmp .L26 .L45: movq 40(%rsp), %rax subq %fs:40, %rax jne .L50 call _ZSt16__throw_bad_castv@PLT .L50: call __stack_chk_fail@PLT .L28: movq %rbx, %rdi call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT movq (%rbx), %rax movl $10, %esi movq %rbx, %rdi call *48(%rax) movl %eax, %esi jmp .L29 .L46: movq %rax, %rdi call _ZNSo5flushEv@PLT .LEHE3: .L26: movq %r12, %rdi call _ZdaPv@PLT movq 40(%rsp), %rax subq %fs:40, %rax jne .L51 addq $48, %rsp .cfi_remember_state .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %rbp .cfi_def_cfa_offset 16 popq %r12 .cfi_def_cfa_offset 8 ret .L31: .cfi_restore_state movq %rbx, %rdi .LEHB4: call _Unwind_Resume@PLT .LEHE4: .L51: call __stack_chk_fail@PLT .cfi_endproc .LFE4313: .globl __gxx_personality_v0 .section .gcc_except_table,"a",@progbits .LLSDA4313: .byte 0xff .byte 0xff .byte 0x1 .uleb128 .LLSDACSE4313-.LLSDACSB4313 .LLSDACSB4313: .uleb128 .LEHB0-.LFB4313 .uleb128 .LEHE0-.LEHB0 .uleb128 0 .uleb128 0 .uleb128 .LEHB1-.LFB4313 .uleb128 .LEHE1-.LEHB1 .uleb128 .L33-.LFB4313 .uleb128 0 .uleb128 .LEHB2-.LFB4313 .uleb128 .LEHE2-.LEHB2 .uleb128 0 .uleb128 0 .uleb128 .LEHB3-.LFB4313 .uleb128 .LEHE3-.LEHB3 .uleb128 .L33-.LFB4313 .uleb128 0 .uleb128 .LEHB4-.LFB4313 .uleb128 .LEHE4-.LEHB4 .uleb128 0 .uleb128 0 .LLSDACSE4313: .text .size _Z14tensor_1d_testv, .-_Z14tensor_1d_testv .globl _Z39__device_stub__Z16tensor_2d_assignPfiimPfiim .type _Z39__device_stub__Z16tensor_2d_assignPfiimPfiim, @function _Z39__device_stub__Z16tensor_2d_assignPfiimPfiim: .LFB4351: .cfi_startproc endbr64 subq $152, %rsp .cfi_def_cfa_offset 160 movq %rdi, 24(%rsp) movl %esi, 20(%rsp) movl %edx, 16(%rsp) movq %rcx, 8(%rsp) movq %fs:40, %rax movq %rax, 136(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 20(%rsp), %rax movq %rax, 104(%rsp) leaq 16(%rsp), %rax movq %rax, 112(%rsp) leaq 8(%rsp), %rax movq %rax, 120(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L56 .L52: movq 136(%rsp), %rax subq %fs:40, %rax jne .L57 addq $152, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L56: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 168 pushq 40(%rsp) .cfi_def_cfa_offset 176 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z16tensor_2d_assignPfiim(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 160 jmp .L52 .L57: call __stack_chk_fail@PLT .cfi_endproc .LFE4351: .size _Z39__device_stub__Z16tensor_2d_assignPfiimPfiim, .-_Z39__device_stub__Z16tensor_2d_assignPfiimPfiim .globl _Z16tensor_2d_assignPfiim .type _Z16tensor_2d_assignPfiim, @function _Z16tensor_2d_assignPfiim: .LFB4352: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z39__device_stub__Z16tensor_2d_assignPfiimPfiim addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE4352: .size _Z16tensor_2d_assignPfiim, .-_Z16tensor_2d_assignPfiim .globl _Z14tensor_2d_testv .type _Z14tensor_2d_testv, @function _Z14tensor_2d_testv: .LFB4322: .cfi_startproc .cfi_personality 0x9b,DW.ref.__gxx_personality_v0 .cfi_lsda 0x1b,.LLSDA4322 endbr64 pushq %rbp .cfi_def_cfa_offset 16 .cfi_offset 6, -16 movq %rsp, %rbp .cfi_def_cfa_register 6 pushq %r15 pushq %r14 pushq %r13 pushq %r12 pushq %rbx subq $72, %rsp .cfi_offset 15, -24 .cfi_offset 14, -32 .cfi_offset 13, -40 .cfi_offset 12, -48 .cfi_offset 3, -56 movq %fs:40, %rax movq %rax, -56(%rbp) xorl %eax, %eax movq $0, -96(%rbp) leaq -96(%rbp), %rsi leaq -88(%rbp), %rdi movl $1024, %ecx movl $16384, %edx .LEHB5: call cudaMallocPitch@PLT movl $16, -80(%rbp) movl $16, -76(%rbp) movl $1, -72(%rbp) movl $64, -68(%rbp) movl $256, -64(%rbp) movl $1, -60(%rbp) movl $0, %r9d movl $0, %r8d movq -80(%rbp), %rdx movl $1, %ecx movq -68(%rbp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L84 .L61: movl $16777216, %edi call _Znam@PLT .LEHE5: movq %rax, %rbx movq %rax, -112(%rbp) subq $8, %rsp pushq $2 movl $1024, %r9d movl $16384, %r8d movq -96(%rbp), %rcx movq -88(%rbp), %rdx movl $16384, %esi movq %rax, %rdi .LEHB6: .cfi_escape 0x2e,0x10 call cudaMemcpy2D@PLT .LEHE6: jmp .L85 .L84: movq -96(%rbp), %rcx movl $1024, %edx movl $4096, %esi movq -88(%rbp), %rdi .LEHB7: call _Z39__device_stub__Z16tensor_2d_assignPfiimPfiim .LEHE7: jmp .L61 .L85: addq $16, %rsp movq -88(%rbp), %rdi .LEHB8: .cfi_escape 0x2e,0 call cudaFree@PLT movq %rbx, %r13 leaq 16777216(%rbx), %r14 movl $0, %ebx leaq _ZSt4cout(%rip), %r15 jmp .L62 .L80: cvtss2sd %xmm0, %xmm0 movq %r15, %rdi call _ZNSo9_M_insertIdEERSoT_@PLT movq %rax, %rcx movq %rax, -104(%rbp) movq (%rax), %rax movq -24(%rax), %rax movq 240(%rcx,%rax), %r12 testq %r12, %r12 je .L86 cmpb $0, 56(%r12) je .L67 movzbl 67(%r12), %esi .L68: movsbl %sil, %esi movq -104(%rbp), %rdi call _ZNSo3putEc@PLT jmp .L87 .L86: movq -56(%rbp), %rax subq %fs:40, %rax jne .L88 call _ZSt16__throw_bad_castv@PLT .L79: endbr64 movq %rax, %rbx movq -112(%rbp), %rdi call _ZdaPv@PLT movq -56(%rbp), %rax subq %fs:40, %rax je .L77 call __stack_chk_fail@PLT .L88: call __stack_chk_fail@PLT .L67: movq %r12, %rdi call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT movq (%r12), %rax movl $10, %esi movq %r12, %rdi call *48(%rax) movl %eax, %esi jmp .L68 .L87: movq %rax, %rdi call _ZNSo5flushEv@PLT .L69: addq $16384, %r13 cmpq %r14, %r13 je .L71 .L62: leal 4096(%rbx), %edx movq %r13, %rax .L70: movss (%rax), %xmm0 pxor %xmm1, %xmm1 cvtsi2ssl %ebx, %xmm1 ucomiss %xmm1, %xmm0 jp .L80 jne .L80 addl $1, %ebx addq $4, %rax cmpl %edx, %ebx jne .L70 movl %edx, %ebx jmp .L69 .L71: movl $2, %edx leaq .LC3(%rip), %rsi leaq _ZSt4cout(%rip), %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movq _ZSt4cout(%rip), %rax movq -24(%rax), %rax leaq _ZSt4cout(%rip), %rdx movq 240(%rdx,%rax), %rbx testq %rbx, %rbx je .L89 cmpb $0, 56(%rbx) je .L74 movzbl 67(%rbx), %eax .L75: movsbl %al, %esi leaq _ZSt4cout(%rip), %rdi call _ZNSo3putEc@PLT jmp .L90 .L89: movq -56(%rbp), %rax subq %fs:40, %rax jne .L91 call _ZSt16__throw_bad_castv@PLT .L91: call __stack_chk_fail@PLT .L74: movq %rbx, %rdi call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT movq (%rbx), %rax movl $10, %esi movq %rbx, %rdi call *48(%rax) jmp .L75 .L90: movq %rax, %rdi call _ZNSo5flushEv@PLT .LEHE8: movq -112(%rbp), %rdi call _ZdaPv@PLT movq -56(%rbp), %rax subq %fs:40, %rax jne .L92 leaq -40(%rbp), %rsp popq %rbx popq %r12 popq %r13 popq %r14 popq %r15 popq %rbp .cfi_remember_state .cfi_def_cfa 7, 8 ret .L77: .cfi_restore_state movq %rbx, %rdi .LEHB9: call _Unwind_Resume@PLT .LEHE9: .L92: call __stack_chk_fail@PLT .cfi_endproc .LFE4322: .section .gcc_except_table .LLSDA4322: .byte 0xff .byte 0xff .byte 0x1 .uleb128 .LLSDACSE4322-.LLSDACSB4322 .LLSDACSB4322: .uleb128 .LEHB5-.LFB4322 .uleb128 .LEHE5-.LEHB5 .uleb128 0 .uleb128 0 .uleb128 .LEHB6-.LFB4322 .uleb128 .LEHE6-.LEHB6 .uleb128 .L79-.LFB4322 .uleb128 0 .uleb128 .LEHB7-.LFB4322 .uleb128 .LEHE7-.LEHB7 .uleb128 0 .uleb128 0 .uleb128 .LEHB8-.LFB4322 .uleb128 .LEHE8-.LEHB8 .uleb128 .L79-.LFB4322 .uleb128 0 .uleb128 .LEHB9-.LFB4322 .uleb128 .LEHE9-.LEHB9 .uleb128 0 .uleb128 0 .LLSDACSE4322: .text .size _Z14tensor_2d_testv, .-_Z14tensor_2d_testv .globl _Z53__device_stub__Z16tensor_3d_assign14cudaPitchedPtriiiR14cudaPitchedPtriii .type _Z53__device_stub__Z16tensor_3d_assign14cudaPitchedPtriiiR14cudaPitchedPtriii, @function _Z53__device_stub__Z16tensor_3d_assign14cudaPitchedPtriiiR14cudaPitchedPtriii: .LFB4353: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movl %esi, 12(%rsp) movl %edx, 8(%rsp) movl %ecx, 4(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax movq %rdi, 80(%rsp) leaq 12(%rsp), %rax movq %rax, 88(%rsp) leaq 8(%rsp), %rax movq %rax, 96(%rsp) leaq 4(%rsp), %rax movq %rax, 104(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) movl $1, 40(%rsp) movl $1, 44(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) leaq 24(%rsp), %rcx leaq 16(%rsp), %rdx leaq 44(%rsp), %rsi leaq 32(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L97 .L93: movq 120(%rsp), %rax subq %fs:40, %rax jne .L98 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L97: .cfi_restore_state pushq 24(%rsp) .cfi_def_cfa_offset 152 pushq 24(%rsp) .cfi_def_cfa_offset 160 leaq 96(%rsp), %r9 movq 60(%rsp), %rcx movl 68(%rsp), %r8d movq 48(%rsp), %rsi movl 56(%rsp), %edx leaq _Z16tensor_3d_assign14cudaPitchedPtriii(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L93 .L98: call __stack_chk_fail@PLT .cfi_endproc .LFE4353: .size _Z53__device_stub__Z16tensor_3d_assign14cudaPitchedPtriiiR14cudaPitchedPtriii, .-_Z53__device_stub__Z16tensor_3d_assign14cudaPitchedPtriiiR14cudaPitchedPtriii .globl _Z16tensor_3d_assign14cudaPitchedPtriii .type _Z16tensor_3d_assign14cudaPitchedPtriii, @function _Z16tensor_3d_assign14cudaPitchedPtriii: .LFB4354: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movl %edx, %ecx movl %esi, %edx movl %edi, %esi leaq 16(%rsp), %rdi call _Z53__device_stub__Z16tensor_3d_assign14cudaPitchedPtriiiR14cudaPitchedPtriii addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE4354: .size _Z16tensor_3d_assign14cudaPitchedPtriii, .-_Z16tensor_3d_assign14cudaPitchedPtriii .section .rodata.str1.1 .LC4: .string "wrong result. host_ptr[" .LC5: .string "][" .LC6: .string ",, value = " .text .globl _Z14tensor_3d_testv .type _Z14tensor_3d_testv, @function _Z14tensor_3d_testv: .LFB4323: .cfi_startproc endbr64 pushq %r15 .cfi_def_cfa_offset 16 .cfi_offset 15, -16 pushq %r14 .cfi_def_cfa_offset 24 .cfi_offset 14, -24 pushq %r13 .cfi_def_cfa_offset 32 .cfi_offset 13, -32 pushq %r12 .cfi_def_cfa_offset 40 .cfi_offset 12, -40 pushq %rbp .cfi_def_cfa_offset 48 .cfi_offset 6, -48 pushq %rbx .cfi_def_cfa_offset 56 .cfi_offset 3, -56 leaq -1048576(%rsp), %r11 .cfi_def_cfa 11, 1048632 .LPSRL0: subq $4096, %rsp orq $0, (%rsp) cmpq %r11, %rsp jne .LPSRL0 .cfi_def_cfa_register 7 subq $296, %rsp .cfi_def_cfa_offset 1048928 movq %fs:40, %rax movq %rax, 1048856(%rsp) xorl %eax, %eax movq $512, 48(%rsp) movq $64, 56(%rsp) movq $32, 64(%rsp) leaq 80(%rsp), %rdi subq $32, %rsp .cfi_def_cfa_offset 1048960 movdqa 80(%rsp), %xmm0 movups %xmm0, (%rsp) movq $32, 16(%rsp) call cudaMalloc3D@PLT movl $4, 56(%rsp) movl $4, 60(%rsp) movl $4, 64(%rsp) movl $8, 68(%rsp) movl $16, 72(%rsp) movl $32, 76(%rsp) addq $32, %rsp .cfi_def_cfa_offset 1048928 movl $0, %r9d movl $0, %r8d movq 24(%rsp), %rdx movl $4, %ecx movq 36(%rsp), %rdi movl $32, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L129 .L102: movdqa 80(%rsp), %xmm1 movaps %xmm1, 144(%rsp) movdqa 96(%rsp), %xmm2 movaps %xmm2, 160(%rsp) leaq 272(%rsp), %rbx movq %rbx, 208(%rsp) movq $512, 216(%rsp) movq $128, 224(%rsp) movq $64, 232(%rsp) movl $2, 264(%rsp) movq $32, 256(%rsp) movq $64, 248(%rsp) movq $512, 240(%rsp) leaq 112(%rsp), %rdi call cudaMemcpy3D@PLT movq 80(%rsp), %rdi call cudaFree@PLT movq %rbx, %rdx movl $0, %r12d movl $0, %ebp jmp .L103 .L129: movdqa 80(%rsp), %xmm3 movaps %xmm3, 112(%rsp) movdqa 96(%rsp), %xmm4 movaps %xmm4, 128(%rsp) leaq 112(%rsp), %rdi movl $128, %ecx movl $64, %edx movl $32, %esi call _Z53__device_stub__Z16tensor_3d_assign14cudaPitchedPtriiiR14cudaPitchedPtriii jmp .L102 .L133: movl %ebx, 12(%rsp) movl $23, %edx leaq .LC4(%rip), %rsi leaq _ZSt4cout(%rip), %r14 movq %r14, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movl %r12d, %esi movq %r14, %rdi call _ZNSolsEi@PLT movq %rax, %r14 movl $2, %edx leaq .LC5(%rip), %r15 movq %r15, %rsi movq %rax, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movl %r13d, %esi movq %r14, %rdi call _ZNSolsEi@PLT movq %rax, %r14 movl $2, %edx movq %r15, %rsi movq %rax, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movl 12(%rsp), %esi movq %r14, %rdi call _ZNSolsEi@PLT movq %rax, %r14 movl $4, %edx leaq .LC1(%rip), %rsi movq %rax, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movslq %ebx, %rbx movslq %r12d, %rax movslq %r13d, %r13 salq $6, %rax addq %r13, %rax salq $7, %rax addq %rbx, %rax movl 272(%rsp,%rax,4), %esi movq %r14, %rdi call _ZNSolsEi@PLT movq %rax, %rbx movl $11, %edx leaq .LC6(%rip), %rsi movq %rax, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movl %ebp, %esi movq %rbx, %rdi call _ZNSolsEi@PLT movq %rax, %rbx movq (%rax), %rax movq -24(%rax), %rax movq 240(%rbx,%rax), %rbp testq %rbp, %rbp je .L130 cmpb $0, 56(%rbp) je .L107 movzbl 67(%rbp), %esi .L108: movsbl %sil, %esi movq %rbx, %rdi call _ZNSo3putEc@PLT movq %rax, %rdi call _ZNSo5flushEv@PLT movl $5, %edx leaq .LC2(%rip), %rsi leaq _ZSt4cout(%rip), %rbx movq %rbx, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movq _ZSt4cout(%rip), %rax movq -24(%rax), %rax movq 240(%rbx,%rax), %rbx testq %rbx, %rbx je .L131 cmpb $0, 56(%rbx) je .L120 movzbl 67(%rbx), %esi .L121: movsbl %sil, %esi leaq _ZSt4cout(%rip), %rdi call _ZNSo3putEc@PLT movq %rax, %rdi call _ZNSo5flushEv@PLT jmp .L101 .L130: movq 1048856(%rsp), %rax subq %fs:40, %rax jne .L132 call _ZSt16__throw_bad_castv@PLT .L132: call __stack_chk_fail@PLT .L107: movq %rbp, %rdi call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT movq 0(%rbp), %rax movl $10, %esi movq %rbp, %rdi call *48(%rax) movl %eax, %esi jmp .L108 .L112: addl $1, %r12d addq $32768, %rdx cmpl $32, %r12d je .L113 .L103: movq %rdx, %rax movl $0, %r13d .L114: movl $0, %ebx .L111: cmpl %ebp, (%rax,%rbx,4) jne .L133 addl $1, %ebp addq $1, %rbx cmpq $128, %rbx jne .L111 addl $1, %r13d addq $512, %rax cmpl $64, %r13d jne .L114 jmp .L112 .L136: movq 1048856(%rsp), %rax subq %fs:40, %rax jne .L134 call _ZSt16__throw_bad_castv@PLT .L134: call __stack_chk_fail@PLT .L116: movq %rbx, %rdi call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT movq (%rbx), %rax movl $10, %esi movq %rbx, %rdi call *48(%rax) movl %eax, %esi jmp .L117 .L131: movq 1048856(%rsp), %rax subq %fs:40, %rax jne .L135 call _ZSt16__throw_bad_castv@PLT .L135: call __stack_chk_fail@PLT .L120: movq %rbx, %rdi call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT movq (%rbx), %rax movl $10, %esi movq %rbx, %rdi call *48(%rax) movl %eax, %esi jmp .L121 .L113: movl $2, %edx leaq .LC3(%rip), %rsi leaq _ZSt4cout(%rip), %rbx movq %rbx, %rdi call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT movq _ZSt4cout(%rip), %rax movq -24(%rax), %rax movq 240(%rbx,%rax), %rbx testq %rbx, %rbx je .L136 cmpb $0, 56(%rbx) je .L116 movzbl 67(%rbx), %esi .L117: movsbl %sil, %esi leaq _ZSt4cout(%rip), %rdi call _ZNSo3putEc@PLT movq %rax, %rdi call _ZNSo5flushEv@PLT .L101: movq 1048856(%rsp), %rax subq %fs:40, %rax jne .L137 addq $1048872, %rsp .cfi_remember_state .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %rbp .cfi_def_cfa_offset 40 popq %r12 .cfi_def_cfa_offset 32 popq %r13 .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 ret .L137: .cfi_restore_state call __stack_chk_fail@PLT .cfi_endproc .LFE4323: .size _Z14tensor_3d_testv, .-_Z14tensor_3d_testv .globl main .type main, @function main: .LFB4324: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z14tensor_3d_testv movl $0, %eax addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE4324: .size main, .-main .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC7: .string "_Z16tensor_3d_assign14cudaPitchedPtriii" .section .rodata.str1.1 .LC8: .string "_Z16tensor_2d_assignPfiim" .LC9: .string "_Z16tensor_1d_assignPfm" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB4356: .cfi_startproc endbr64 pushq %rbx .cfi_def_cfa_offset 16 .cfi_offset 3, -16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rbx movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC7(%rip), %rdx movq %rdx, %rcx leaq _Z16tensor_3d_assign14cudaPitchedPtriii(%rip), %rsi movq %rax, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC8(%rip), %rdx movq %rdx, %rcx leaq _Z16tensor_2d_assignPfiim(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC9(%rip), %rdx movq %rdx, %rcx leaq _Z16tensor_1d_assignPfm(%rip), %rsi movq %rbx, %rdi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT popq %rbx .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE4356: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .hidden DW.ref.__gxx_personality_v0 .weak DW.ref.__gxx_personality_v0 .section .data.rel.local.DW.ref.__gxx_personality_v0,"awG",@progbits,DW.ref.__gxx_personality_v0,comdat .align 8 .type DW.ref.__gxx_personality_v0, @object .size DW.ref.__gxx_personality_v0, 8 DW.ref.__gxx_personality_v0: .quad __gxx_personality_v0 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <iostream> #include <memory> __global__ void tensor_1d_assign(float *tensor, size_t tensor_size) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < tensor_size) { tensor[i] = i; } } void tensor_1d_test() { float *device_ptr; cudaMalloc(&device_ptr, sizeof(float) * 1024 * 1024); const int block_dim = 256; const int grid_dim = 1024 * 1024 / 256; tensor_1d_assign<<<grid_dim, block_dim>>>(device_ptr, 1024 * 1024); auto host_ptr = std::unique_ptr<float[]>(new float[1024 * 1024]); cudaMemcpy(host_ptr.get(), device_ptr, sizeof(float) * 1024 * 1024, cudaMemcpyDeviceToHost); cudaFree(device_ptr); bool is_ok = true; for (auto i = 0; i < 1024 * 1024; ++i) { if (host_ptr[i] != i) { is_ok = false; std::cout << "host_ptr[" << i << "] = " << host_ptr[i] << std::endl; break; } } if (is_ok) { std::cout << "ok" << std::endl; } else { std::cout << "wrong" << std::endl; } } __global__ void tensor_2d_assign(float *tensor, int width, int height, size_t pitch) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; if (!(i < height && j < width)) { return; } float *tensor_row = reinterpret_cast<float *>(reinterpret_cast<char *>(tensor) + i * pitch); tensor_row[j] = i * width + j; } void tensor_2d_test() { const int width = 4096; const int height = 1024; size_t pitch = 0u; float *device_ptr; cudaMallocPitch(&device_ptr, &pitch, width * sizeof(float), height); dim3 block_dim(16, 16); dim3 grid_dim(1024 / 16, 4096 / 16); tensor_2d_assign<<<grid_dim, block_dim>>>(device_ptr, width, height, pitch); auto host_ptr = std::unique_ptr<float[]>(new float[width * height]); cudaMemcpy2D(host_ptr.get(), width * sizeof(float), device_ptr, pitch, width * sizeof(float), height, cudaMemcpyDeviceToHost); cudaFree(device_ptr); bool is_ok = true; auto value = 0; for (auto i = 0; i < height; ++i) { for (auto j = 0; j < width; ++j, ++value) { if (host_ptr[width * i + j] != value) { std::cout << host_ptr[width * i + j] << std::endl; break; } } if (!is_ok) { break; } } if (is_ok) { std::cout << "ok" << std::endl; } else { std::cout << "wrong" << std::endl; } } __global__ void tensor_3d_assign(cudaPitchedPtr pitched_ptr, int depth, int height, int width) { const size_t pitch = pitched_ptr.pitch; const size_t slice_pitch = pitch * height; int d_idx = blockDim.x * blockIdx.x + threadIdx.x; int h_idx = blockDim.y * blockIdx.y + threadIdx.y; int w_idx = blockDim.z * blockIdx.z + threadIdx.z; char *slice_ptr = reinterpret_cast<char *>(pitched_ptr.ptr) + d_idx * slice_pitch; int *row = reinterpret_cast<int *>(slice_ptr + h_idx * pitch); row[w_idx] = d_idx * height * width + h_idx * width + w_idx; } void tensor_3d_test() { const int depth = 32; const int height = 64; const int width = 128; cudaExtent extent = make_cudaExtent(width * sizeof(int), height, depth); cudaPitchedPtr pitched_ptr; cudaMalloc3D(&pitched_ptr, extent); dim3 block_dim(4, 4, 4); dim3 grid_dim(depth / 4, height / 4, width / 4); tensor_3d_assign<<<grid_dim, block_dim>>>(pitched_ptr, depth, height, width); int host_ptr[32][64][128]; cudaMemcpy3DParms memcpy_params; memcpy_params.srcPtr = pitched_ptr; memcpy_params.dstPtr.ptr = host_ptr; memcpy_params.dstPtr.pitch = width * sizeof(int); memcpy_params.dstPtr.xsize = width; memcpy_params.dstPtr.ysize = height; memcpy_params.kind = cudaMemcpyDeviceToHost; memcpy_params.extent.depth = depth; memcpy_params.extent.height = height; memcpy_params.extent.width = width * sizeof(int); cudaMemcpy3D(&memcpy_params); cudaFree(pitched_ptr.ptr); bool is_ok = true; int value = 0; for (int d = 0; d < depth; ++d) { for (int h = 0; h < height; ++h) { for (int w = 0; w < width; ++w, ++value) { if (host_ptr[d][h][w] != value) { std::cout << "wrong result. host_ptr[" << d << "][" << h << "][" << w << "] = " << host_ptr[d][h][w] << ",, value = " << value << std::endl; is_ok = false; break; } } if (!is_ok) break; } if (!is_ok) break; } if (is_ok) { std::cout << "ok" << std::endl; } else { std::cout << "wrong" << std::endl; } } int main() { // tensor_1d_test(); // tensor_2d_test(); tensor_3d_test(); return 0; }
#include <hip/hip_runtime.h> #include <iostream> #include <memory> __global__ void tensor_1d_assign(float *tensor, size_t tensor_size) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < tensor_size) { tensor[i] = i; } } void tensor_1d_test() { float *device_ptr; hipMalloc(&device_ptr, sizeof(float) * 1024 * 1024); const int block_dim = 256; const int grid_dim = 1024 * 1024 / 256; tensor_1d_assign<<<grid_dim, block_dim>>>(device_ptr, 1024 * 1024); auto host_ptr = std::unique_ptr<float[]>(new float[1024 * 1024]); hipMemcpy(host_ptr.get(), device_ptr, sizeof(float) * 1024 * 1024, hipMemcpyDeviceToHost); hipFree(device_ptr); bool is_ok = true; for (auto i = 0; i < 1024 * 1024; ++i) { if (host_ptr[i] != i) { is_ok = false; std::cout << "host_ptr[" << i << "] = " << host_ptr[i] << std::endl; break; } } if (is_ok) { std::cout << "ok" << std::endl; } else { std::cout << "wrong" << std::endl; } } __global__ void tensor_2d_assign(float *tensor, int width, int height, size_t pitch) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; if (!(i < height && j < width)) { return; } float *tensor_row = reinterpret_cast<float *>(reinterpret_cast<char *>(tensor) + i * pitch); tensor_row[j] = i * width + j; } void tensor_2d_test() { const int width = 4096; const int height = 1024; size_t pitch = 0u; float *device_ptr; hipMallocPitch(&device_ptr, &pitch, width * sizeof(float), height); dim3 block_dim(16, 16); dim3 grid_dim(1024 / 16, 4096 / 16); tensor_2d_assign<<<grid_dim, block_dim>>>(device_ptr, width, height, pitch); auto host_ptr = std::unique_ptr<float[]>(new float[width * height]); hipMemcpy2D(host_ptr.get(), width * sizeof(float), device_ptr, pitch, width * sizeof(float), height, hipMemcpyDeviceToHost); hipFree(device_ptr); bool is_ok = true; auto value = 0; for (auto i = 0; i < height; ++i) { for (auto j = 0; j < width; ++j, ++value) { if (host_ptr[width * i + j] != value) { std::cout << host_ptr[width * i + j] << std::endl; break; } } if (!is_ok) { break; } } if (is_ok) { std::cout << "ok" << std::endl; } else { std::cout << "wrong" << std::endl; } } __global__ void tensor_3d_assign(hipPitchedPtr pitched_ptr, int depth, int height, int width) { const size_t pitch = pitched_ptr.pitch; const size_t slice_pitch = pitch * height; int d_idx = blockDim.x * blockIdx.x + threadIdx.x; int h_idx = blockDim.y * blockIdx.y + threadIdx.y; int w_idx = blockDim.z * blockIdx.z + threadIdx.z; char *slice_ptr = reinterpret_cast<char *>(pitched_ptr.ptr) + d_idx * slice_pitch; int *row = reinterpret_cast<int *>(slice_ptr + h_idx * pitch); row[w_idx] = d_idx * height * width + h_idx * width + w_idx; } void tensor_3d_test() { const int depth = 32; const int height = 64; const int width = 128; hipExtent extent = make_hipExtent(width * sizeof(int), height, depth); hipPitchedPtr pitched_ptr; hipMalloc3D(&pitched_ptr, extent); dim3 block_dim(4, 4, 4); dim3 grid_dim(depth / 4, height / 4, width / 4); tensor_3d_assign<<<grid_dim, block_dim>>>(pitched_ptr, depth, height, width); int host_ptr[32][64][128]; hipMemcpy3DParms memcpy_params; memcpy_params.srcPtr = pitched_ptr; memcpy_params.dstPtr.ptr = host_ptr; memcpy_params.dstPtr.pitch = width * sizeof(int); memcpy_params.dstPtr.xsize = width; memcpy_params.dstPtr.ysize = height; memcpy_params.kind = hipMemcpyDeviceToHost; memcpy_params.extent.depth = depth; memcpy_params.extent.height = height; memcpy_params.extent.width = width * sizeof(int); hipMemcpy3D(&memcpy_params); hipFree(pitched_ptr.ptr); bool is_ok = true; int value = 0; for (int d = 0; d < depth; ++d) { for (int h = 0; h < height; ++h) { for (int w = 0; w < width; ++w, ++value) { if (host_ptr[d][h][w] != value) { std::cout << "wrong result. host_ptr[" << d << "][" << h << "][" << w << "] = " << host_ptr[d][h][w] << ",, value = " << value << std::endl; is_ok = false; break; } } if (!is_ok) break; } if (!is_ok) break; } if (is_ok) { std::cout << "ok" << std::endl; } else { std::cout << "wrong" << std::endl; } } int main() { // tensor_1d_test(); // tensor_2d_test(); tensor_3d_test(); return 0; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include <iostream> #include <memory> __global__ void tensor_1d_assign(float *tensor, size_t tensor_size) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < tensor_size) { tensor[i] = i; } } void tensor_1d_test() { float *device_ptr; hipMalloc(&device_ptr, sizeof(float) * 1024 * 1024); const int block_dim = 256; const int grid_dim = 1024 * 1024 / 256; tensor_1d_assign<<<grid_dim, block_dim>>>(device_ptr, 1024 * 1024); auto host_ptr = std::unique_ptr<float[]>(new float[1024 * 1024]); hipMemcpy(host_ptr.get(), device_ptr, sizeof(float) * 1024 * 1024, hipMemcpyDeviceToHost); hipFree(device_ptr); bool is_ok = true; for (auto i = 0; i < 1024 * 1024; ++i) { if (host_ptr[i] != i) { is_ok = false; std::cout << "host_ptr[" << i << "] = " << host_ptr[i] << std::endl; break; } } if (is_ok) { std::cout << "ok" << std::endl; } else { std::cout << "wrong" << std::endl; } } __global__ void tensor_2d_assign(float *tensor, int width, int height, size_t pitch) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; if (!(i < height && j < width)) { return; } float *tensor_row = reinterpret_cast<float *>(reinterpret_cast<char *>(tensor) + i * pitch); tensor_row[j] = i * width + j; } void tensor_2d_test() { const int width = 4096; const int height = 1024; size_t pitch = 0u; float *device_ptr; hipMallocPitch(&device_ptr, &pitch, width * sizeof(float), height); dim3 block_dim(16, 16); dim3 grid_dim(1024 / 16, 4096 / 16); tensor_2d_assign<<<grid_dim, block_dim>>>(device_ptr, width, height, pitch); auto host_ptr = std::unique_ptr<float[]>(new float[width * height]); hipMemcpy2D(host_ptr.get(), width * sizeof(float), device_ptr, pitch, width * sizeof(float), height, hipMemcpyDeviceToHost); hipFree(device_ptr); bool is_ok = true; auto value = 0; for (auto i = 0; i < height; ++i) { for (auto j = 0; j < width; ++j, ++value) { if (host_ptr[width * i + j] != value) { std::cout << host_ptr[width * i + j] << std::endl; break; } } if (!is_ok) { break; } } if (is_ok) { std::cout << "ok" << std::endl; } else { std::cout << "wrong" << std::endl; } } __global__ void tensor_3d_assign(hipPitchedPtr pitched_ptr, int depth, int height, int width) { const size_t pitch = pitched_ptr.pitch; const size_t slice_pitch = pitch * height; int d_idx = blockDim.x * blockIdx.x + threadIdx.x; int h_idx = blockDim.y * blockIdx.y + threadIdx.y; int w_idx = blockDim.z * blockIdx.z + threadIdx.z; char *slice_ptr = reinterpret_cast<char *>(pitched_ptr.ptr) + d_idx * slice_pitch; int *row = reinterpret_cast<int *>(slice_ptr + h_idx * pitch); row[w_idx] = d_idx * height * width + h_idx * width + w_idx; } void tensor_3d_test() { const int depth = 32; const int height = 64; const int width = 128; hipExtent extent = make_hipExtent(width * sizeof(int), height, depth); hipPitchedPtr pitched_ptr; hipMalloc3D(&pitched_ptr, extent); dim3 block_dim(4, 4, 4); dim3 grid_dim(depth / 4, height / 4, width / 4); tensor_3d_assign<<<grid_dim, block_dim>>>(pitched_ptr, depth, height, width); int host_ptr[32][64][128]; hipMemcpy3DParms memcpy_params; memcpy_params.srcPtr = pitched_ptr; memcpy_params.dstPtr.ptr = host_ptr; memcpy_params.dstPtr.pitch = width * sizeof(int); memcpy_params.dstPtr.xsize = width; memcpy_params.dstPtr.ysize = height; memcpy_params.kind = hipMemcpyDeviceToHost; memcpy_params.extent.depth = depth; memcpy_params.extent.height = height; memcpy_params.extent.width = width * sizeof(int); hipMemcpy3D(&memcpy_params); hipFree(pitched_ptr.ptr); bool is_ok = true; int value = 0; for (int d = 0; d < depth; ++d) { for (int h = 0; h < height; ++h) { for (int w = 0; w < width; ++w, ++value) { if (host_ptr[d][h][w] != value) { std::cout << "wrong result. host_ptr[" << d << "][" << h << "][" << w << "] = " << host_ptr[d][h][w] << ",, value = " << value << std::endl; is_ok = false; break; } } if (!is_ok) break; } if (!is_ok) break; } if (is_ok) { std::cout << "ok" << std::endl; } else { std::cout << "wrong" << std::endl; } } int main() { // tensor_1d_test(); // tensor_2d_test(); tensor_3d_test(); return 0; }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z16tensor_1d_assignPfm .globl _Z16tensor_1d_assignPfm .p2align 8 .type _Z16tensor_1d_assignPfm,@function _Z16tensor_1d_assignPfm: s_clause 0x1 s_load_b32 s4, s[0:1], 0x1c s_load_b64 s[2:3], s[0:1], 0x8 s_waitcnt lgkmcnt(0) s_and_b32 s4, s4, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s4, v[0:1] v_ashrrev_i32_e32 v2, 31, v1 s_delay_alu instid0(VALU_DEP_1) v_cmp_gt_u64_e32 vcc_lo, s[2:3], v[1:2] s_and_saveexec_b32 s2, vcc_lo s_cbranch_execz .LBB0_2 s_load_b64 s[0:1], s[0:1], 0x0 v_lshlrev_b64 v[2:3], 2, v[1:2] v_cvt_f32_i32_e32 v4, v1 s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) v_add_co_u32 v0, vcc_lo, s0, v2 v_add_co_ci_u32_e32 v1, vcc_lo, s1, v3, vcc_lo global_store_b32 v[0:1], v4, off .LBB0_2: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z16tensor_1d_assignPfm .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 272 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 5 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z16tensor_1d_assignPfm, .Lfunc_end0-_Z16tensor_1d_assignPfm .section .AMDGPU.csdata,"",@progbits .text .protected _Z16tensor_2d_assignPfiim .globl _Z16tensor_2d_assignPfiim .p2align 8 .type _Z16tensor_2d_assignPfiim,@function _Z16tensor_2d_assignPfiim: s_clause 0x1 s_load_b32 s2, s[0:1], 0x24 s_load_b64 s[4:5], s[0:1], 0x8 v_and_b32_e32 v1, 0x3ff, v0 v_bfe_u32 v4, v0, 10, 10 s_waitcnt lgkmcnt(0) s_and_b32 s3, s2, 0xffff s_lshr_b32 s2, s2, 16 v_mad_u64_u32 v[2:3], null, s14, s3, v[1:2] v_mad_u64_u32 v[0:1], null, s15, s2, v[4:5] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_cmp_gt_i32_e32 vcc_lo, s5, v2 v_cmp_gt_i32_e64 s2, s4, v0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_and_b32 s2, vcc_lo, s2 s_and_saveexec_b32 s3, s2 s_cbranch_execz .LBB1_2 s_clause 0x1 s_load_b64 s[2:3], s[0:1], 0x0 s_load_b64 s[0:1], s[0:1], 0x10 v_ashrrev_i32_e32 v1, 31, v2 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mad_u64_u32 v[3:4], null, v2, s4, v[0:1] v_cvt_f32_i32_e32 v3, v3 s_waitcnt lgkmcnt(0) v_mad_u64_u32 v[4:5], null, v2, s0, s[2:3] v_mul_lo_u32 v2, v2, s1 v_mul_lo_u32 v6, v1, s0 v_ashrrev_i32_e32 v1, 31, v0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) v_lshlrev_b64 v[0:1], 2, v[0:1] v_add3_u32 v2, v6, v5, v2 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v0, vcc_lo, v4, v0 v_add_co_ci_u32_e32 v1, vcc_lo, v2, v1, vcc_lo global_store_b32 v[0:1], v3, off .LBB1_2: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z16tensor_2d_assignPfiim .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 280 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 7 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end1: .size _Z16tensor_2d_assignPfiim, .Lfunc_end1-_Z16tensor_2d_assignPfiim .section .AMDGPU.csdata,"",@progbits .text .protected _Z16tensor_3d_assign13hipPitchedPtriii .globl _Z16tensor_3d_assign13hipPitchedPtriii .p2align 8 .type _Z16tensor_3d_assign13hipPitchedPtriii,@function _Z16tensor_3d_assign13hipPitchedPtriii: s_clause 0x2 s_load_b64 s[2:3], s[0:1], 0x24 s_load_b128 s[4:7], s[0:1], 0x0 s_load_b64 s[0:1], s[0:1], 0x3c v_and_b32_e32 v1, 0x3ff, v0 v_bfe_u32 v2, v0, 10, 10 v_bfe_u32 v0, v0, 20, 10 s_waitcnt lgkmcnt(0) s_ashr_i32 s8, s2, 31 s_mul_i32 s11, s6, s2 s_and_b32 s10, s0, 0xffff s_lshr_b32 s0, s0, 16 v_mad_u64_u32 v[3:4], null, s13, s10, v[1:2] s_mul_hi_u32 s9, s6, s2 s_mul_i32 s8, s6, s8 s_and_b32 s1, s1, 0xffff s_mul_i32 s10, s7, s2 s_add_i32 s8, s9, s8 s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) s_add_i32 s8, s8, s10 v_mad_u64_u32 v[4:5], null, s14, s0, v[2:3] v_mad_u64_u32 v[1:2], null, s11, v3, s[4:5] v_ashrrev_i32_e32 v6, 31, v3 v_mul_lo_u32 v8, s8, v3 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4) v_mul_lo_u32 v9, s11, v6 v_mad_u64_u32 v[5:6], null, s15, s1, v[0:1] v_ashrrev_i32_e32 v0, 31, v4 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) v_add3_u32 v2, v8, v2, v9 v_mad_u64_u32 v[6:7], null, v3, s2, v[4:5] v_mul_lo_u32 v3, s7, v4 s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) v_mul_lo_u32 v10, s6, v0 v_mad_u64_u32 v[7:8], null, v6, s3, v[5:6] v_mad_u64_u32 v[8:9], null, s6, v4, v[1:2] v_ashrrev_i32_e32 v6, 31, v5 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) v_lshlrev_b64 v[0:1], 2, v[5:6] v_add3_u32 v2, v3, v9, v10 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v0, vcc_lo, v8, v0 v_add_co_ci_u32_e32 v1, vcc_lo, v2, v1, vcc_lo global_store_b32 v[0:1], v7, off s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z16tensor_3d_assign13hipPitchedPtriii .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 304 .amdhsa_user_sgpr_count 13 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 1 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 2 .amdhsa_next_free_vgpr 11 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end2: .size _Z16tensor_3d_assign13hipPitchedPtriii, .Lfunc_end2-_Z16tensor_3d_assign13hipPitchedPtriii .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .offset: 8 .size: 8 .value_kind: by_value - .offset: 16 .size: 4 .value_kind: hidden_block_count_x - .offset: 20 .size: 4 .value_kind: hidden_block_count_y - .offset: 24 .size: 4 .value_kind: hidden_block_count_z - .offset: 28 .size: 2 .value_kind: hidden_group_size_x - .offset: 30 .size: 2 .value_kind: hidden_group_size_y - .offset: 32 .size: 2 .value_kind: hidden_group_size_z - .offset: 34 .size: 2 .value_kind: hidden_remainder_x - .offset: 36 .size: 2 .value_kind: hidden_remainder_y - .offset: 38 .size: 2 .value_kind: hidden_remainder_z - .offset: 56 .size: 8 .value_kind: hidden_global_offset_x - .offset: 64 .size: 8 .value_kind: hidden_global_offset_y - .offset: 72 .size: 8 .value_kind: hidden_global_offset_z - .offset: 80 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 272 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z16tensor_1d_assignPfm .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z16tensor_1d_assignPfm.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 5 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .offset: 8 .size: 4 .value_kind: by_value - .offset: 12 .size: 4 .value_kind: by_value - .offset: 16 .size: 8 .value_kind: by_value - .offset: 24 .size: 4 .value_kind: hidden_block_count_x - .offset: 28 .size: 4 .value_kind: hidden_block_count_y - .offset: 32 .size: 4 .value_kind: hidden_block_count_z - .offset: 36 .size: 2 .value_kind: hidden_group_size_x - .offset: 38 .size: 2 .value_kind: hidden_group_size_y - .offset: 40 .size: 2 .value_kind: hidden_group_size_z - .offset: 42 .size: 2 .value_kind: hidden_remainder_x - .offset: 44 .size: 2 .value_kind: hidden_remainder_y - .offset: 46 .size: 2 .value_kind: hidden_remainder_z - .offset: 64 .size: 8 .value_kind: hidden_global_offset_x - .offset: 72 .size: 8 .value_kind: hidden_global_offset_y - .offset: 80 .size: 8 .value_kind: hidden_global_offset_z - .offset: 88 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 280 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z16tensor_2d_assignPfiim .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z16tensor_2d_assignPfiim.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 7 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 - .args: - .offset: 0 .size: 32 .value_kind: by_value - .offset: 32 .size: 4 .value_kind: by_value - .offset: 36 .size: 4 .value_kind: by_value - .offset: 40 .size: 4 .value_kind: by_value - .offset: 48 .size: 4 .value_kind: hidden_block_count_x - .offset: 52 .size: 4 .value_kind: hidden_block_count_y - .offset: 56 .size: 4 .value_kind: hidden_block_count_z - .offset: 60 .size: 2 .value_kind: hidden_group_size_x - .offset: 62 .size: 2 .value_kind: hidden_group_size_y - .offset: 64 .size: 2 .value_kind: hidden_group_size_z - .offset: 66 .size: 2 .value_kind: hidden_remainder_x - .offset: 68 .size: 2 .value_kind: hidden_remainder_y - .offset: 70 .size: 2 .value_kind: hidden_remainder_z - .offset: 88 .size: 8 .value_kind: hidden_global_offset_x - .offset: 96 .size: 8 .value_kind: hidden_global_offset_y - .offset: 104 .size: 8 .value_kind: hidden_global_offset_z - .offset: 112 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 304 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z16tensor_3d_assign13hipPitchedPtriii .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z16tensor_3d_assign13hipPitchedPtriii.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 11 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include <iostream> #include <memory> __global__ void tensor_1d_assign(float *tensor, size_t tensor_size) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < tensor_size) { tensor[i] = i; } } void tensor_1d_test() { float *device_ptr; hipMalloc(&device_ptr, sizeof(float) * 1024 * 1024); const int block_dim = 256; const int grid_dim = 1024 * 1024 / 256; tensor_1d_assign<<<grid_dim, block_dim>>>(device_ptr, 1024 * 1024); auto host_ptr = std::unique_ptr<float[]>(new float[1024 * 1024]); hipMemcpy(host_ptr.get(), device_ptr, sizeof(float) * 1024 * 1024, hipMemcpyDeviceToHost); hipFree(device_ptr); bool is_ok = true; for (auto i = 0; i < 1024 * 1024; ++i) { if (host_ptr[i] != i) { is_ok = false; std::cout << "host_ptr[" << i << "] = " << host_ptr[i] << std::endl; break; } } if (is_ok) { std::cout << "ok" << std::endl; } else { std::cout << "wrong" << std::endl; } } __global__ void tensor_2d_assign(float *tensor, int width, int height, size_t pitch) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; if (!(i < height && j < width)) { return; } float *tensor_row = reinterpret_cast<float *>(reinterpret_cast<char *>(tensor) + i * pitch); tensor_row[j] = i * width + j; } void tensor_2d_test() { const int width = 4096; const int height = 1024; size_t pitch = 0u; float *device_ptr; hipMallocPitch(&device_ptr, &pitch, width * sizeof(float), height); dim3 block_dim(16, 16); dim3 grid_dim(1024 / 16, 4096 / 16); tensor_2d_assign<<<grid_dim, block_dim>>>(device_ptr, width, height, pitch); auto host_ptr = std::unique_ptr<float[]>(new float[width * height]); hipMemcpy2D(host_ptr.get(), width * sizeof(float), device_ptr, pitch, width * sizeof(float), height, hipMemcpyDeviceToHost); hipFree(device_ptr); bool is_ok = true; auto value = 0; for (auto i = 0; i < height; ++i) { for (auto j = 0; j < width; ++j, ++value) { if (host_ptr[width * i + j] != value) { std::cout << host_ptr[width * i + j] << std::endl; break; } } if (!is_ok) { break; } } if (is_ok) { std::cout << "ok" << std::endl; } else { std::cout << "wrong" << std::endl; } } __global__ void tensor_3d_assign(hipPitchedPtr pitched_ptr, int depth, int height, int width) { const size_t pitch = pitched_ptr.pitch; const size_t slice_pitch = pitch * height; int d_idx = blockDim.x * blockIdx.x + threadIdx.x; int h_idx = blockDim.y * blockIdx.y + threadIdx.y; int w_idx = blockDim.z * blockIdx.z + threadIdx.z; char *slice_ptr = reinterpret_cast<char *>(pitched_ptr.ptr) + d_idx * slice_pitch; int *row = reinterpret_cast<int *>(slice_ptr + h_idx * pitch); row[w_idx] = d_idx * height * width + h_idx * width + w_idx; } void tensor_3d_test() { const int depth = 32; const int height = 64; const int width = 128; hipExtent extent = make_hipExtent(width * sizeof(int), height, depth); hipPitchedPtr pitched_ptr; hipMalloc3D(&pitched_ptr, extent); dim3 block_dim(4, 4, 4); dim3 grid_dim(depth / 4, height / 4, width / 4); tensor_3d_assign<<<grid_dim, block_dim>>>(pitched_ptr, depth, height, width); int host_ptr[32][64][128]; hipMemcpy3DParms memcpy_params; memcpy_params.srcPtr = pitched_ptr; memcpy_params.dstPtr.ptr = host_ptr; memcpy_params.dstPtr.pitch = width * sizeof(int); memcpy_params.dstPtr.xsize = width; memcpy_params.dstPtr.ysize = height; memcpy_params.kind = hipMemcpyDeviceToHost; memcpy_params.extent.depth = depth; memcpy_params.extent.height = height; memcpy_params.extent.width = width * sizeof(int); hipMemcpy3D(&memcpy_params); hipFree(pitched_ptr.ptr); bool is_ok = true; int value = 0; for (int d = 0; d < depth; ++d) { for (int h = 0; h < height; ++h) { for (int w = 0; w < width; ++w, ++value) { if (host_ptr[d][h][w] != value) { std::cout << "wrong result. host_ptr[" << d << "][" << h << "][" << w << "] = " << host_ptr[d][h][w] << ",, value = " << value << std::endl; is_ok = false; break; } } if (!is_ok) break; } if (!is_ok) break; } if (is_ok) { std::cout << "ok" << std::endl; } else { std::cout << "wrong" << std::endl; } } int main() { // tensor_1d_test(); // tensor_2d_test(); tensor_3d_test(); return 0; }
.text .file "chap3.hip" # Start of file scope inline assembly .globl _ZSt21ios_base_library_initv # End of file scope inline assembly .globl _Z31__device_stub__tensor_1d_assignPfm # -- Begin function _Z31__device_stub__tensor_1d_assignPfm .p2align 4, 0x90 .type _Z31__device_stub__tensor_1d_assignPfm,@function _Z31__device_stub__tensor_1d_assignPfm: # @_Z31__device_stub__tensor_1d_assignPfm .cfi_startproc # %bb.0: subq $88, %rsp .cfi_def_cfa_offset 96 movq %rdi, 56(%rsp) movq %rsi, 48(%rsp) leaq 56(%rsp), %rax movq %rax, 64(%rsp) leaq 48(%rsp), %rax movq %rax, 72(%rsp) leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 64(%rsp), %r9 movl $_Z16tensor_1d_assignPfm, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $104, %rsp .cfi_adjust_cfa_offset -104 retq .Lfunc_end0: .size _Z31__device_stub__tensor_1d_assignPfm, .Lfunc_end0-_Z31__device_stub__tensor_1d_assignPfm .cfi_endproc # -- End function .globl _Z14tensor_1d_testv # -- Begin function _Z14tensor_1d_testv .p2align 4, 0x90 .type _Z14tensor_1d_testv,@function _Z14tensor_1d_testv: # @_Z14tensor_1d_testv .Lfunc_begin0: .cfi_startproc .cfi_personality 3, __gxx_personality_v0 .cfi_lsda 3, .Lexception0 # %bb.0: pushq %r15 .cfi_def_cfa_offset 16 pushq %r14 .cfi_def_cfa_offset 24 pushq %rbx .cfi_def_cfa_offset 32 subq $96, %rsp .cfi_def_cfa_offset 128 .cfi_offset %rbx, -32 .cfi_offset %r14, -24 .cfi_offset %r15, -16 .cfi_escape 0x2e, 0x00 leaq 8(%rsp), %rdi movl $4194304, %esi # imm = 0x400000 callq hipMalloc movabsq $4294967552, %rdx # imm = 0x100000100 leaq 3840(%rdx), %rdi .cfi_escape 0x2e, 0x00 movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_2 # %bb.1: movq 8(%rsp), %rax movq %rax, 72(%rsp) movq $1048576, 64(%rsp) # imm = 0x100000 leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) .cfi_escape 0x2e, 0x00 leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d .cfi_escape 0x2e, 0x10 leaq 80(%rsp), %r9 movl $_Z16tensor_1d_assignPfm, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_2: .cfi_escape 0x2e, 0x00 movl $4194304, %edi # imm = 0x400000 callq _Znam movq %rax, %rbx movq 8(%rsp), %rsi .Ltmp0: .cfi_escape 0x2e, 0x00 movl $4194304, %edx # imm = 0x400000 movq %rax, %rdi movl $2, %ecx callq hipMemcpy .Ltmp1: # %bb.3: movq 8(%rsp), %rdi .Ltmp2: .cfi_escape 0x2e, 0x00 callq hipFree .Ltmp3: # %bb.4: # %.preheader.preheader xorl %r14d, %r14d .p2align 4, 0x90 .LBB1_5: # %.preheader # =>This Inner Loop Header: Depth=1 xorps %xmm0, %xmm0 cvtsi2ss %r14d, %xmm0 movss (%rbx,%r14,4), %xmm1 # xmm1 = mem[0],zero,zero,zero ucomiss %xmm0, %xmm1 jne .LBB1_6 jp .LBB1_6 # %bb.23: # in Loop: Header=BB1_5 Depth=1 incq %r14 cmpq $1048576, %r14 # imm = 0x100000 jne .LBB1_5 # %bb.24: # %.critedge .Ltmp5: .cfi_escape 0x2e, 0x00 movl $_ZSt4cout, %edi movl $.L.str.2, %esi movl $2, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l .Ltmp6: # %bb.25: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit17 movq _ZSt4cout(%rip), %rax movq -24(%rax), %rax movq _ZSt4cout+240(%rax), %r14 testq %r14, %r14 je .LBB1_21 # %bb.26: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i29 cmpb $0, 56(%r14) jne .LBB1_33 # %bb.27: .Ltmp7: .cfi_escape 0x2e, 0x00 movq %r14, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv .Ltmp8: jmp .LBB1_32 .LBB1_6: .Ltmp9: .cfi_escape 0x2e, 0x00 movl $_ZSt4cout, %edi movl $.L.str, %esi movl $9, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l .Ltmp10: # %bb.7: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit .Ltmp11: .cfi_escape 0x2e, 0x00 movl $_ZSt4cout, %edi movl %r14d, %esi callq _ZNSolsEi .Ltmp12: # %bb.8: .Ltmp13: movq %rax, %r15 .cfi_escape 0x2e, 0x00 movl $.L.str.1, %esi movl $4, %edx movq %rax, %rdi callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l .Ltmp14: # %bb.9: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit16 movss (%rbx,%r14,4), %xmm0 # xmm0 = mem[0],zero,zero,zero cvtss2sd %xmm0, %xmm0 .Ltmp15: .cfi_escape 0x2e, 0x00 movq %r15, %rdi callq _ZNSo9_M_insertIdEERSoT_ .Ltmp16: # %bb.10: # %_ZNSolsEf.exit movq %rax, %r14 movq (%rax), %rax movq -24(%rax), %rax movq 240(%r14,%rax), %r15 testq %r15, %r15 je .LBB1_11 # %bb.13: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i cmpb $0, 56(%r15) je .LBB1_15 # %bb.14: movzbl 67(%r15), %eax jmp .LBB1_17 .LBB1_15: .Ltmp17: .cfi_escape 0x2e, 0x00 movq %r15, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv .Ltmp18: # %bb.16: # %.noexc24 movq (%r15), %rax .Ltmp19: .cfi_escape 0x2e, 0x00 movq %r15, %rdi movl $10, %esi callq *48(%rax) .Ltmp20: .LBB1_17: # %_ZNKSt9basic_iosIcSt11char_traitsIcEE5widenEc.exit.i .Ltmp21: .cfi_escape 0x2e, 0x00 movsbl %al, %esi movq %r14, %rdi callq _ZNSo3putEc .Ltmp22: # %bb.18: # %.noexc26 .Ltmp23: .cfi_escape 0x2e, 0x00 movq %rax, %rdi callq _ZNSo5flushEv .Ltmp24: # %bb.19: # %_ZNSolsEPFRSoS_E.exit .Ltmp25: .cfi_escape 0x2e, 0x00 movl $_ZSt4cout, %edi movl $.L.str.3, %esi movl $5, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l .Ltmp26: # %bb.20: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit19 movq _ZSt4cout(%rip), %rax movq -24(%rax), %rax movq _ZSt4cout+240(%rax), %r14 testq %r14, %r14 je .LBB1_21 # %bb.30: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i40 cmpb $0, 56(%r14) je .LBB1_31 .LBB1_33: # %_ZNKSt9basic_iosIcSt11char_traitsIcEE5widenEc.exit.i42.invoke.sink.split movzbl 67(%r14), %eax jmp .LBB1_34 .LBB1_31: .Ltmp27: .cfi_escape 0x2e, 0x00 movq %r14, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv .Ltmp28: .LBB1_32: # %.noexc45.invoke movq (%r14), %rax .Ltmp29: .cfi_escape 0x2e, 0x00 movq %r14, %rdi movl $10, %esi callq *48(%rax) .Ltmp30: .LBB1_34: # %_ZNKSt9basic_iosIcSt11char_traitsIcEE5widenEc.exit.i42.invoke .Ltmp31: .cfi_escape 0x2e, 0x00 movsbl %al, %esi movl $_ZSt4cout, %edi callq _ZNSo3putEc .Ltmp32: # %bb.35: # %.noexc47.invoke .Ltmp33: .cfi_escape 0x2e, 0x00 movq %rax, %rdi callq _ZNSo5flushEv .Ltmp34: # %bb.36: # %_ZNKSt14default_deleteIA_fEclIfEENSt9enable_ifIXsr14is_convertibleIPA_T_PS0_EE5valueEvE4typeEPS4_.exit.i .cfi_escape 0x2e, 0x00 movq %rbx, %rdi callq _ZdaPv addq $96, %rsp .cfi_def_cfa_offset 32 popq %rbx .cfi_def_cfa_offset 24 popq %r14 .cfi_def_cfa_offset 16 popq %r15 .cfi_def_cfa_offset 8 retq .LBB1_21: # %.invoke .cfi_def_cfa_offset 128 .Ltmp35: .cfi_escape 0x2e, 0x00 callq _ZSt16__throw_bad_castv .Ltmp36: # %bb.29: # %.cont .LBB1_11: .Ltmp38: .cfi_escape 0x2e, 0x00 callq _ZSt16__throw_bad_castv .Ltmp39: # %bb.12: # %.noexc .LBB1_37: .Ltmp4: jmp .LBB1_38 .LBB1_22: .Ltmp40: jmp .LBB1_38 .LBB1_28: .Ltmp37: .LBB1_38: # %_ZNKSt14default_deleteIA_fEclIfEENSt9enable_ifIXsr14is_convertibleIPA_T_PS0_EE5valueEvE4typeEPS4_.exit.i22 movq %rax, %r14 .cfi_escape 0x2e, 0x00 movq %rbx, %rdi callq _ZdaPv .cfi_escape 0x2e, 0x00 movq %r14, %rdi callq _Unwind_Resume@PLT .Lfunc_end1: .size _Z14tensor_1d_testv, .Lfunc_end1-_Z14tensor_1d_testv .cfi_endproc .section .gcc_except_table,"a",@progbits .p2align 2, 0x0 GCC_except_table1: .Lexception0: .byte 255 # @LPStart Encoding = omit .byte 255 # @TType Encoding = omit .byte 1 # Call site Encoding = uleb128 .uleb128 .Lcst_end0-.Lcst_begin0 .Lcst_begin0: .uleb128 .Lfunc_begin0-.Lfunc_begin0 # >> Call Site 1 << .uleb128 .Ltmp0-.Lfunc_begin0 # Call between .Lfunc_begin0 and .Ltmp0 .byte 0 # has no landing pad .byte 0 # On action: cleanup .uleb128 .Ltmp0-.Lfunc_begin0 # >> Call Site 2 << .uleb128 .Ltmp3-.Ltmp0 # Call between .Ltmp0 and .Ltmp3 .uleb128 .Ltmp4-.Lfunc_begin0 # jumps to .Ltmp4 .byte 0 # On action: cleanup .uleb128 .Ltmp5-.Lfunc_begin0 # >> Call Site 3 << .uleb128 .Ltmp8-.Ltmp5 # Call between .Ltmp5 and .Ltmp8 .uleb128 .Ltmp37-.Lfunc_begin0 # jumps to .Ltmp37 .byte 0 # On action: cleanup .uleb128 .Ltmp9-.Lfunc_begin0 # >> Call Site 4 << .uleb128 .Ltmp24-.Ltmp9 # Call between .Ltmp9 and .Ltmp24 .uleb128 .Ltmp40-.Lfunc_begin0 # jumps to .Ltmp40 .byte 0 # On action: cleanup .uleb128 .Ltmp25-.Lfunc_begin0 # >> Call Site 5 << .uleb128 .Ltmp36-.Ltmp25 # Call between .Ltmp25 and .Ltmp36 .uleb128 .Ltmp37-.Lfunc_begin0 # jumps to .Ltmp37 .byte 0 # On action: cleanup .uleb128 .Ltmp38-.Lfunc_begin0 # >> Call Site 6 << .uleb128 .Ltmp39-.Ltmp38 # Call between .Ltmp38 and .Ltmp39 .uleb128 .Ltmp40-.Lfunc_begin0 # jumps to .Ltmp40 .byte 0 # On action: cleanup .uleb128 .Ltmp39-.Lfunc_begin0 # >> Call Site 7 << .uleb128 .Lfunc_end1-.Ltmp39 # Call between .Ltmp39 and .Lfunc_end1 .byte 0 # has no landing pad .byte 0 # On action: cleanup .Lcst_end0: .p2align 2, 0x0 # -- End function .text .globl _Z31__device_stub__tensor_2d_assignPfiim # -- Begin function _Z31__device_stub__tensor_2d_assignPfiim .p2align 4, 0x90 .type _Z31__device_stub__tensor_2d_assignPfiim,@function _Z31__device_stub__tensor_2d_assignPfiim: # @_Z31__device_stub__tensor_2d_assignPfiim .cfi_startproc # %bb.0: subq $120, %rsp .cfi_def_cfa_offset 128 movq %rdi, 72(%rsp) movl %esi, 12(%rsp) movl %edx, 8(%rsp) movq %rcx, 64(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 12(%rsp), %rax movq %rax, 88(%rsp) leaq 8(%rsp), %rax movq %rax, 96(%rsp) leaq 64(%rsp), %rax movq %rax, 104(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z16tensor_2d_assignPfiim, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $136, %rsp .cfi_adjust_cfa_offset -136 retq .Lfunc_end2: .size _Z31__device_stub__tensor_2d_assignPfiim, .Lfunc_end2-_Z31__device_stub__tensor_2d_assignPfiim .cfi_endproc # -- End function .globl _Z14tensor_2d_testv # -- Begin function _Z14tensor_2d_testv .p2align 4, 0x90 .type _Z14tensor_2d_testv,@function _Z14tensor_2d_testv: # @_Z14tensor_2d_testv .Lfunc_begin1: .cfi_startproc .cfi_personality 3, __gxx_personality_v0 .cfi_lsda 3, .Lexception1 # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $136, %rsp .cfi_def_cfa_offset 192 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movq $0, 8(%rsp) .cfi_escape 0x2e, 0x00 movq %rsp, %rdi leaq 8(%rsp), %rsi movl $16384, %edx # imm = 0x4000 movl $1024, %ecx # imm = 0x400 callq hipMallocPitch .cfi_escape 0x2e, 0x00 movabsq $1099511627840, %rdi # imm = 0x10000000040 movabsq $68719476752, %rdx # imm = 0x1000000010 movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB3_2 # %bb.1: movq (%rsp), %rax movq 8(%rsp), %rcx movq %rax, 88(%rsp) movl $4096, 20(%rsp) # imm = 0x1000 movl $1024, 16(%rsp) # imm = 0x400 movq %rcx, 80(%rsp) leaq 88(%rsp), %rax movq %rax, 96(%rsp) leaq 20(%rsp), %rax movq %rax, 104(%rsp) leaq 16(%rsp), %rax movq %rax, 112(%rsp) leaq 80(%rsp), %rax movq %rax, 120(%rsp) .cfi_escape 0x2e, 0x00 leaq 64(%rsp), %rdi leaq 48(%rsp), %rsi leaq 40(%rsp), %rdx leaq 32(%rsp), %rcx callq __hipPopCallConfiguration movq 64(%rsp), %rsi movl 72(%rsp), %edx movq 48(%rsp), %rcx movl 56(%rsp), %r8d .cfi_escape 0x2e, 0x10 leaq 96(%rsp), %r9 movl $_Z16tensor_2d_assignPfiim, %edi pushq 32(%rsp) .cfi_adjust_cfa_offset 8 pushq 48(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB3_2: .cfi_escape 0x2e, 0x00 movl $16777216, %edi # imm = 0x1000000 callq _Znam movq %rax, %rbx movq (%rsp), %rdx movq 8(%rsp), %rcx .Ltmp41: .cfi_escape 0x2e, 0x10 subq $8, %rsp .cfi_adjust_cfa_offset 8 movl $16384, %esi # imm = 0x4000 movl $16384, %r8d # imm = 0x4000 movl $1024, %r9d # imm = 0x400 movq %rax, %rdi pushq $2 .cfi_adjust_cfa_offset 8 movq %rax, 40(%rsp) # 8-byte Spill callq hipMemcpy2D addq $16, %rsp .cfi_adjust_cfa_offset -16 .Ltmp42: # %bb.3: movq (%rsp), %rdi .Ltmp43: .cfi_escape 0x2e, 0x00 callq hipFree .Ltmp44: # %bb.4: # %.preheader.preheader xorl %r12d, %r12d movq %rbx, %r13 xorl %eax, %eax jmp .LBB3_5 .p2align 4, 0x90 .LBB3_21: # %_ZNSolsEPFRSoS_E.exit # in Loop: Header=BB3_5 Depth=1 incq %r12 addq $16384, %r13 # imm = 0x4000 cmpq $1024, %r12 # imm = 0x400 je .LBB3_22 .LBB3_5: # %.preheader # =>This Loop Header: Depth=1 # Child Loop BB3_6 Depth 2 movl %eax, %ebp addl $4096, %eax # imm = 0x1000 xorl %ebx, %ebx .p2align 4, 0x90 .LBB3_6: # Parent Loop BB3_5 Depth=1 # => This Inner Loop Header: Depth=2 leal (%rbx,%rbp), %ecx xorps %xmm1, %xmm1 cvtsi2ss %ecx, %xmm1 movss (%r13,%rbx,4), %xmm0 # xmm0 = mem[0],zero,zero,zero ucomiss %xmm1, %xmm0 jne .LBB3_7 jp .LBB3_7 # %bb.20: # in Loop: Header=BB3_6 Depth=2 incq %rbx cmpq $4096, %rbx # imm = 0x1000 jne .LBB3_6 jmp .LBB3_21 .p2align 4, 0x90 .LBB3_7: # in Loop: Header=BB3_5 Depth=1 cvtss2sd %xmm0, %xmm0 .Ltmp46: .cfi_escape 0x2e, 0x00 movl $_ZSt4cout, %edi callq _ZNSo9_M_insertIdEERSoT_ .Ltmp47: # %bb.8: # %_ZNSolsEf.exit # in Loop: Header=BB3_5 Depth=1 movq %rax, %r14 movq (%rax), %rax movq -24(%rax), %rax movq 240(%r14,%rax), %r15 testq %r15, %r15 je .LBB3_9 # %bb.11: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i # in Loop: Header=BB3_5 Depth=1 cmpb $0, 56(%r15) je .LBB3_13 # %bb.12: # in Loop: Header=BB3_5 Depth=1 movzbl 67(%r15), %eax jmp .LBB3_15 .LBB3_13: # in Loop: Header=BB3_5 Depth=1 .Ltmp48: .cfi_escape 0x2e, 0x00 movq %r15, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv .Ltmp49: # %bb.14: # %.noexc31 # in Loop: Header=BB3_5 Depth=1 movq (%r15), %rax .Ltmp50: .cfi_escape 0x2e, 0x00 movq %r15, %rdi movl $10, %esi callq *48(%rax) .Ltmp51: .LBB3_15: # %_ZNKSt9basic_iosIcSt11char_traitsIcEE5widenEc.exit.i # in Loop: Header=BB3_5 Depth=1 .Ltmp52: .cfi_escape 0x2e, 0x00 movsbl %al, %esi movq %r14, %rdi callq _ZNSo3putEc .Ltmp53: # %bb.16: # %.noexc33 # in Loop: Header=BB3_5 Depth=1 .Ltmp54: .cfi_escape 0x2e, 0x00 movq %rax, %rdi callq _ZNSo5flushEv .Ltmp55: # %bb.17: # %.noexc33._ZNSolsEPFRSoS_E.exit_crit_edge # in Loop: Header=BB3_5 Depth=1 addq %rbx, %rbp movl %ebp, %eax jmp .LBB3_21 .LBB3_22: .Ltmp57: .cfi_escape 0x2e, 0x00 movl $_ZSt4cout, %edi movl $.L.str.2, %esi movl $2, %edx movq 24(%rsp), %rbx # 8-byte Reload callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l .Ltmp58: # %bb.23: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit movq _ZSt4cout(%rip), %rax movq -24(%rax), %rax movq _ZSt4cout+240(%rax), %r14 testq %r14, %r14 je .LBB3_24 # %bb.26: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i36 cmpb $0, 56(%r14) je .LBB3_28 # %bb.27: movzbl 67(%r14), %eax jmp .LBB3_30 .LBB3_28: .Ltmp59: .cfi_escape 0x2e, 0x00 movq %r14, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv .Ltmp60: # %bb.29: # %.noexc41 movq (%r14), %rax .Ltmp61: .cfi_escape 0x2e, 0x00 movq %r14, %rdi movl $10, %esi callq *48(%rax) .Ltmp62: .LBB3_30: # %_ZNKSt9basic_iosIcSt11char_traitsIcEE5widenEc.exit.i38 .Ltmp63: .cfi_escape 0x2e, 0x00 movsbl %al, %esi movl $_ZSt4cout, %edi callq _ZNSo3putEc .Ltmp64: # %bb.31: # %.noexc43 .Ltmp65: .cfi_escape 0x2e, 0x00 movq %rax, %rdi callq _ZNSo5flushEv .Ltmp66: # %bb.32: # %_ZNKSt14default_deleteIA_fEclIfEENSt9enable_ifIXsr14is_convertibleIPA_T_PS0_EE5valueEvE4typeEPS4_.exit.i .cfi_escape 0x2e, 0x00 movq %rbx, %rdi callq _ZdaPv addq $136, %rsp .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .LBB3_9: .cfi_def_cfa_offset 192 .Ltmp70: .cfi_escape 0x2e, 0x00 callq _ZSt16__throw_bad_castv .Ltmp71: # %bb.10: # %.noexc .LBB3_24: .Ltmp67: .cfi_escape 0x2e, 0x00 callq _ZSt16__throw_bad_castv .Ltmp68: # %bb.25: # %.noexc40 .LBB3_33: .Ltmp45: jmp .LBB3_34 .LBB3_35: .Ltmp69: jmp .LBB3_34 .LBB3_19: # %.loopexit.split-lp .Ltmp72: jmp .LBB3_34 .LBB3_18: # %.loopexit .Ltmp56: .LBB3_34: # %_ZNKSt14default_deleteIA_fEclIfEENSt9enable_ifIXsr14is_convertibleIPA_T_PS0_EE5valueEvE4typeEPS4_.exit.i29 movq %rax, %r14 .cfi_escape 0x2e, 0x00 movq 24(%rsp), %rdi # 8-byte Reload callq _ZdaPv .cfi_escape 0x2e, 0x00 movq %r14, %rdi callq _Unwind_Resume@PLT .Lfunc_end3: .size _Z14tensor_2d_testv, .Lfunc_end3-_Z14tensor_2d_testv .cfi_endproc .section .gcc_except_table,"a",@progbits .p2align 2, 0x0 GCC_except_table3: .Lexception1: .byte 255 # @LPStart Encoding = omit .byte 255 # @TType Encoding = omit .byte 1 # Call site Encoding = uleb128 .uleb128 .Lcst_end1-.Lcst_begin1 .Lcst_begin1: .uleb128 .Lfunc_begin1-.Lfunc_begin1 # >> Call Site 1 << .uleb128 .Ltmp41-.Lfunc_begin1 # Call between .Lfunc_begin1 and .Ltmp41 .byte 0 # has no landing pad .byte 0 # On action: cleanup .uleb128 .Ltmp41-.Lfunc_begin1 # >> Call Site 2 << .uleb128 .Ltmp44-.Ltmp41 # Call between .Ltmp41 and .Ltmp44 .uleb128 .Ltmp45-.Lfunc_begin1 # jumps to .Ltmp45 .byte 0 # On action: cleanup .uleb128 .Ltmp46-.Lfunc_begin1 # >> Call Site 3 << .uleb128 .Ltmp55-.Ltmp46 # Call between .Ltmp46 and .Ltmp55 .uleb128 .Ltmp56-.Lfunc_begin1 # jumps to .Ltmp56 .byte 0 # On action: cleanup .uleb128 .Ltmp57-.Lfunc_begin1 # >> Call Site 4 << .uleb128 .Ltmp66-.Ltmp57 # Call between .Ltmp57 and .Ltmp66 .uleb128 .Ltmp69-.Lfunc_begin1 # jumps to .Ltmp69 .byte 0 # On action: cleanup .uleb128 .Ltmp70-.Lfunc_begin1 # >> Call Site 5 << .uleb128 .Ltmp71-.Ltmp70 # Call between .Ltmp70 and .Ltmp71 .uleb128 .Ltmp72-.Lfunc_begin1 # jumps to .Ltmp72 .byte 0 # On action: cleanup .uleb128 .Ltmp67-.Lfunc_begin1 # >> Call Site 6 << .uleb128 .Ltmp68-.Ltmp67 # Call between .Ltmp67 and .Ltmp68 .uleb128 .Ltmp69-.Lfunc_begin1 # jumps to .Ltmp69 .byte 0 # On action: cleanup .uleb128 .Ltmp68-.Lfunc_begin1 # >> Call Site 7 << .uleb128 .Lfunc_end3-.Ltmp68 # Call between .Ltmp68 and .Lfunc_end3 .byte 0 # has no landing pad .byte 0 # On action: cleanup .Lcst_end1: .p2align 2, 0x0 # -- End function .text .globl _Z31__device_stub__tensor_3d_assign13hipPitchedPtriii # -- Begin function _Z31__device_stub__tensor_3d_assign13hipPitchedPtriii .p2align 4, 0x90 .type _Z31__device_stub__tensor_3d_assign13hipPitchedPtriii,@function _Z31__device_stub__tensor_3d_assign13hipPitchedPtriii: # @_Z31__device_stub__tensor_3d_assign13hipPitchedPtriii .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movl %edi, 12(%rsp) movl %esi, 8(%rsp) movl %edx, 4(%rsp) leaq 112(%rsp), %rax movq %rax, 64(%rsp) leaq 12(%rsp), %rax movq %rax, 72(%rsp) leaq 8(%rsp), %rax movq %rax, 80(%rsp) leaq 4(%rsp), %rax movq %rax, 88(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 64(%rsp), %r9 movl $_Z16tensor_3d_assign13hipPitchedPtriii, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end4: .size _Z31__device_stub__tensor_3d_assign13hipPitchedPtriii, .Lfunc_end4-_Z31__device_stub__tensor_3d_assign13hipPitchedPtriii .cfi_endproc # -- End function .globl _Z14tensor_3d_testv # -- Begin function _Z14tensor_3d_testv .p2align 4, 0x90 .type _Z14tensor_3d_testv,@function _Z14tensor_3d_testv: # @_Z14tensor_3d_testv .cfi_startproc # %bb.0: pushq %rbp .cfi_def_cfa_offset 16 pushq %r15 .cfi_def_cfa_offset 24 pushq %r14 .cfi_def_cfa_offset 32 pushq %r13 .cfi_def_cfa_offset 40 pushq %r12 .cfi_def_cfa_offset 48 pushq %rbx .cfi_def_cfa_offset 56 subq $1048904, %rsp # imm = 0x100148 .cfi_def_cfa_offset 1048960 .cfi_offset %rbx, -56 .cfi_offset %r12, -48 .cfi_offset %r13, -40 .cfi_offset %r14, -32 .cfi_offset %r15, -24 .cfi_offset %rbp, -16 movq $512, 136(%rsp) # imm = 0x200 movq $64, 144(%rsp) movq $32, 152(%rsp) movups 136(%rsp), %xmm0 movups %xmm0, (%rsp) movq $32, 16(%rsp) leaq 104(%rsp), %rdi callq hipMalloc3D xorl %r13d, %r13d movabsq $68719476744, %rdi # imm = 0x1000000008 movabsq $17179869188, %rdx # imm = 0x400000004 movl $32, %esi movl $4, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB5_2 # %bb.1: movups 104(%rsp), %xmm0 movups 120(%rsp), %xmm1 movaps %xmm1, 176(%rsp) movaps %xmm0, 160(%rsp) movl $32, 44(%rsp) movl $64, 40(%rsp) movl $128, 36(%rsp) leaq 160(%rsp), %rax movq %rax, 320(%rsp) leaq 44(%rsp), %rax movq %rax, 328(%rsp) leaq 40(%rsp), %rax movq %rax, 336(%rsp) leaq 36(%rsp), %rax movq %rax, 344(%rsp) leaq 88(%rsp), %rdi leaq 72(%rsp), %rsi leaq 64(%rsp), %rdx leaq 56(%rsp), %rcx callq __hipPopCallConfiguration movq 64(%rsp), %rax movq 56(%rsp), %rdi movq 88(%rsp), %rsi movl 96(%rsp), %edx movq 72(%rsp), %rcx movl 80(%rsp), %r8d movq %rdi, 8(%rsp) movq %rax, (%rsp) leaq 320(%rsp), %r9 movl $_Z16tensor_3d_assign13hipPitchedPtriii, %edi callq hipLaunchKernel .LBB5_2: movups 104(%rsp), %xmm0 movups 120(%rsp), %xmm1 movups %xmm1, 208(%rsp) movups %xmm0, 192(%rsp) leaq 320(%rsp), %rbp movq %rbp, 256(%rsp) movq $512, 264(%rsp) # imm = 0x200 movq $128, 272(%rsp) movq $64, 280(%rsp) movl $2, 312(%rsp) movq $32, 304(%rsp) movq $64, 296(%rsp) movq $512, 288(%rsp) # imm = 0x200 leaq 160(%rsp), %rdi callq hipMemcpy3D movq 104(%rsp), %rdi callq hipFree movb $1, %al xorl %ebx, %ebx .p2align 4, 0x90 .LBB5_3: # %.preheader52 # =>This Loop Header: Depth=1 # Child Loop BB5_4 Depth 2 # Child Loop BB5_5 Depth 3 movq %rbp, 48(%rsp) # 8-byte Spill xorl %r14d, %r14d .p2align 4, 0x90 .LBB5_4: # %.preheader # Parent Loop BB5_3 Depth=1 # => This Loop Header: Depth=2 # Child Loop BB5_5 Depth 3 movl %r13d, %r15d subl $-128, %r13d xorl %r12d, %r12d .p2align 4, 0x90 .LBB5_5: # Parent Loop BB5_3 Depth=1 # Parent Loop BB5_4 Depth=2 # => This Inner Loop Header: Depth=3 leal (%r15,%r12), %ecx cmpl (%rbp,%r12,4), %ecx jne .LBB5_8 # %bb.6: # in Loop: Header=BB5_5 Depth=3 incq %r12 cmpq $128, %r12 jne .LBB5_5 # %bb.7: # %.loopexit # in Loop: Header=BB5_4 Depth=2 testb $1, %al jne .LBB5_13 jmp .LBB5_14 .p2align 4, 0x90 .LBB5_8: # in Loop: Header=BB5_4 Depth=2 movl $_ZSt4cout, %edi movl $.L.str.4, %esi movl $23, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movl $_ZSt4cout, %edi movl %ebx, %esi callq _ZNSolsEi movq %rax, %r13 movl $.L.str.5, %esi movl $2, %edx movq %rax, %rdi callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movq %r13, %rdi movl %r14d, %esi callq _ZNSolsEi movq %rax, %r13 movl $.L.str.5, %esi movl $2, %edx movq %rax, %rdi callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movq %r13, %rdi movl %r12d, %esi callq _ZNSolsEi movq %rax, %r13 movl $.L.str.1, %esi movl $4, %edx movq %rax, %rdi callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movl (%rbp,%r12,4), %esi movq %r13, %rdi callq _ZNSolsEi movq %rax, %r13 movl $.L.str.6, %esi movl $11, %edx movq %rax, %rdi callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l addq %r12, %r15 movq %r13, %rdi movl %r15d, %esi callq _ZNSolsEi movq (%rax), %rcx movq -24(%rcx), %rcx movq 240(%rax,%rcx), %r12 testq %r12, %r12 je .LBB5_25 # %bb.9: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i # in Loop: Header=BB5_4 Depth=2 cmpb $0, 56(%r12) je .LBB5_11 # %bb.10: # in Loop: Header=BB5_4 Depth=2 movzbl 67(%r12), %ecx jmp .LBB5_12 .LBB5_11: # in Loop: Header=BB5_4 Depth=2 movq %r12, %rdi movq %rax, %r13 callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%r12), %rax movq %r12, %rdi movl $10, %esi callq *48(%rax) movl %eax, %ecx movq %r13, %rax .LBB5_12: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit # in Loop: Header=BB5_4 Depth=2 movsbl %cl, %esi movq %rax, %rdi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv xorl %eax, %eax movl %r15d, %r13d testb $1, %al je .LBB5_14 .LBB5_13: # %.loopexit # in Loop: Header=BB5_4 Depth=2 leaq 1(%r14), %rcx addq $512, %rbp # imm = 0x200 cmpq $63, %r14 movq %rcx, %r14 jb .LBB5_4 .LBB5_14: # in Loop: Header=BB5_3 Depth=1 testb $1, %al movq 48(%rsp), %rbp # 8-byte Reload je .LBB5_16 # %bb.15: # in Loop: Header=BB5_3 Depth=1 incq %rbx addq $32768, %rbp # imm = 0x8000 cmpq $32, %rbx jne .LBB5_3 .LBB5_16: movl $_ZSt4cout, %edi testb $1, %al jne .LBB5_20 # %bb.17: movl $.L.str.3, %esi movl $5, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movq _ZSt4cout(%rip), %rax movq -24(%rax), %rax movq _ZSt4cout+240(%rax), %rbx testq %rbx, %rbx je .LBB5_25 # %bb.18: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i44 cmpb $0, 56(%rbx) jne .LBB5_22 .LBB5_23: movq %rbx, %rdi callq _ZNKSt5ctypeIcE13_M_widen_initEv movq (%rbx), %rax movq %rbx, %rdi movl $10, %esi callq *48(%rax) jmp .LBB5_24 .LBB5_20: movl $.L.str.2, %esi movl $2, %edx callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l movq _ZSt4cout(%rip), %rax movq -24(%rax), %rax movq _ZSt4cout+240(%rax), %rbx testq %rbx, %rbx je .LBB5_25 # %bb.21: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i39 cmpb $0, 56(%rbx) je .LBB5_23 .LBB5_22: movzbl 67(%rbx), %eax .LBB5_24: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit42 movsbl %al, %esi movl $_ZSt4cout, %edi callq _ZNSo3putEc movq %rax, %rdi callq _ZNSo5flushEv addq $1048904, %rsp # imm = 0x100148 .cfi_def_cfa_offset 56 popq %rbx .cfi_def_cfa_offset 48 popq %r12 .cfi_def_cfa_offset 40 popq %r13 .cfi_def_cfa_offset 32 popq %r14 .cfi_def_cfa_offset 24 popq %r15 .cfi_def_cfa_offset 16 popq %rbp .cfi_def_cfa_offset 8 retq .LBB5_25: .cfi_def_cfa_offset 1048960 callq _ZSt16__throw_bad_castv .Lfunc_end5: .size _Z14tensor_3d_testv, .Lfunc_end5-_Z14tensor_3d_testv .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: pushq %rax .cfi_def_cfa_offset 16 callq _Z14tensor_3d_testv xorl %eax, %eax popq %rcx .cfi_def_cfa_offset 8 retq .Lfunc_end6: .size main, .Lfunc_end6-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: pushq %rbx .cfi_def_cfa_offset 16 subq $32, %rsp .cfi_def_cfa_offset 48 .cfi_offset %rbx, -16 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB7_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB7_2: movq __hip_gpubin_handle(%rip), %rbx xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z16tensor_1d_assignPfm, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z16tensor_2d_assignPfiim, %esi movl $.L__unnamed_2, %edx movl $.L__unnamed_2, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z16tensor_3d_assign13hipPitchedPtriii, %esi movl $.L__unnamed_3, %edx movl $.L__unnamed_3, %ecx movq %rbx, %rdi movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $32, %rsp .cfi_def_cfa_offset 16 popq %rbx .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end7: .size __hip_module_ctor, .Lfunc_end7-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB8_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB8_2: retq .Lfunc_end8: .size __hip_module_dtor, .Lfunc_end8-__hip_module_dtor .cfi_endproc # -- End function .type _Z16tensor_1d_assignPfm,@object # @_Z16tensor_1d_assignPfm .section .rodata,"a",@progbits .globl _Z16tensor_1d_assignPfm .p2align 3, 0x0 _Z16tensor_1d_assignPfm: .quad _Z31__device_stub__tensor_1d_assignPfm .size _Z16tensor_1d_assignPfm, 8 .type .L.str,@object # @.str .section .rodata.str1.1,"aMS",@progbits,1 .L.str: .asciz "host_ptr[" .size .L.str, 10 .type .L.str.1,@object # @.str.1 .L.str.1: .asciz "] = " .size .L.str.1, 5 .type .L.str.2,@object # @.str.2 .L.str.2: .asciz "ok" .size .L.str.2, 3 .type .L.str.3,@object # @.str.3 .L.str.3: .asciz "wrong" .size .L.str.3, 6 .type _Z16tensor_2d_assignPfiim,@object # @_Z16tensor_2d_assignPfiim .section .rodata,"a",@progbits .globl _Z16tensor_2d_assignPfiim .p2align 3, 0x0 _Z16tensor_2d_assignPfiim: .quad _Z31__device_stub__tensor_2d_assignPfiim .size _Z16tensor_2d_assignPfiim, 8 .type _Z16tensor_3d_assign13hipPitchedPtriii,@object # @_Z16tensor_3d_assign13hipPitchedPtriii .globl _Z16tensor_3d_assign13hipPitchedPtriii .p2align 3, 0x0 _Z16tensor_3d_assign13hipPitchedPtriii: .quad _Z31__device_stub__tensor_3d_assign13hipPitchedPtriii .size _Z16tensor_3d_assign13hipPitchedPtriii, 8 .type .L.str.4,@object # @.str.4 .section .rodata.str1.1,"aMS",@progbits,1 .L.str.4: .asciz "wrong result. host_ptr[" .size .L.str.4, 24 .type .L.str.5,@object # @.str.5 .L.str.5: .asciz "][" .size .L.str.5, 3 .type .L.str.6,@object # @.str.6 .L.str.6: .asciz ",, value = " .size .L.str.6, 12 .type .L__unnamed_1,@object # @0 .L__unnamed_1: .asciz "_Z16tensor_1d_assignPfm" .size .L__unnamed_1, 24 .type .L__unnamed_2,@object # @1 .L__unnamed_2: .asciz "_Z16tensor_2d_assignPfiim" .size .L__unnamed_2, 26 .type .L__unnamed_3,@object # @2 .L__unnamed_3: .asciz "_Z16tensor_3d_assign13hipPitchedPtriii" .size .L__unnamed_3, 39 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z31__device_stub__tensor_1d_assignPfm .addrsig_sym __gxx_personality_v0 .addrsig_sym _Z31__device_stub__tensor_2d_assignPfiim .addrsig_sym _Z31__device_stub__tensor_3d_assign13hipPitchedPtriii .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Unwind_Resume .addrsig_sym _Z16tensor_1d_assignPfm .addrsig_sym _ZSt4cout .addrsig_sym _Z16tensor_2d_assignPfiim .addrsig_sym _Z16tensor_3d_assign13hipPitchedPtriii .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z16tensor_3d_assign14cudaPitchedPtriii .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R2, SR_CTAID.Y ; /* 0x0000000000027919 */ /* 0x000e220000002600 */ /*0020*/ MOV R7, c[0x0][0x168] ; /* 0x00005a0000077a02 */ /* 0x000fe20000000f00 */ /*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe40000000a00 */ /*0040*/ S2R R3, SR_TID.Y ; /* 0x0000000000037919 */ /* 0x000e280000002200 */ /*0050*/ S2R R9, SR_CTAID.X ; /* 0x0000000000097919 */ /* 0x000e680000002500 */ /*0060*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */ /* 0x000e680000002100 */ /*0070*/ S2R R8, SR_CTAID.Z ; /* 0x0000000000087919 */ /* 0x000ea80000002700 */ /*0080*/ S2R R11, SR_TID.Z ; /* 0x00000000000b7919 */ /* 0x000ea20000002300 */ /*0090*/ IMAD R2, R2, c[0x0][0x4], R3 ; /* 0x0000010002027a24 */ /* 0x001fca00078e0203 */ /*00a0*/ SHF.R.S32.HI R3, RZ, 0x1f, R2 ; /* 0x0000001fff037819 */ /* 0x000fe20000011402 */ /*00b0*/ IMAD R9, R9, c[0x0][0x0], R0 ; /* 0x0000000009097a24 */ /* 0x002fc800078e0200 */ /*00c0*/ IMAD.WIDE R4, R9, c[0x0][0x184], R2 ; /* 0x0000610009047a25 */ /* 0x000fc800078e0202 */ /*00d0*/ IMAD R3, R5, c[0x0][0x168], RZ ; /* 0x00005a0005037a24 */ /* 0x000fe400078e02ff */ /*00e0*/ IMAD.WIDE.U32 R6, R4, R7, c[0x0][0x160] ; /* 0x0000580004067625 */ /* 0x000fc800078e0007 */ /*00f0*/ IMAD R5, R4, c[0x0][0x16c], R3 ; /* 0x00005b0004057a24 */ /* 0x000fe400078e0203 */ /*0100*/ IMAD R3, R8, c[0x0][0x8], R11 ; /* 0x0000020008037a24 */ /* 0x004fe400078e020b */ /*0110*/ IMAD R2, R9, c[0x0][0x184], R2 ; /* 0x0000610009027a24 */ /* 0x000fe200078e0202 */ /*0120*/ IADD3 R7, R7, R5, RZ ; /* 0x0000000507077210 */ /* 0x000fc60007ffe0ff */ /*0130*/ IMAD R5, R2, c[0x0][0x188], R3 ; /* 0x0000620002057a24 */ /* 0x000fe400078e0203 */ /*0140*/ IMAD.WIDE R2, R3, 0x4, R6 ; /* 0x0000000403027825 */ /* 0x000fca00078e0206 */ /*0150*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */ /* 0x000fe2000c101904 */ /*0160*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0170*/ BRA 0x170; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ .......... Function : _Z16tensor_2d_assignPfiim .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */ /* 0x000e280000002500 */ /*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e280000002100 */ /*0030*/ S2R R5, SR_CTAID.Y ; /* 0x0000000000057919 */ /* 0x000e680000002600 */ /*0040*/ S2R R2, SR_TID.Y ; /* 0x0000000000027919 */ /* 0x000e620000002200 */ /*0050*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */ /* 0x001fca00078e0203 */ /*0060*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x16c], PT ; /* 0x00005b0000007a0c */ /* 0x000fe20003f06270 */ /*0070*/ IMAD R5, R5, c[0x0][0x4], R2 ; /* 0x0000010005057a24 */ /* 0x002fca00078e0202 */ /*0080*/ ISETP.GE.OR P0, PT, R5, c[0x0][0x168], P0 ; /* 0x00005a0005007a0c */ /* 0x000fda0000706670 */ /*0090*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*00a0*/ SHF.R.S32.HI R2, RZ, 0x1f, R0 ; /* 0x0000001fff027819 */ /* 0x000fe20000011400 */ /*00b0*/ IMAD R4, R0, c[0x0][0x168], R5 ; /* 0x00005a0000047a24 */ /* 0x000fe200078e0205 */ /*00c0*/ MOV R3, c[0x0][0x170] ; /* 0x00005c0000037a02 */ /* 0x000fe20000000f00 */ /*00d0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fe40000000a00 */ /*00e0*/ IMAD R9, R2, c[0x0][0x170], RZ ; /* 0x00005c0002097a24 */ /* 0x000fe200078e02ff */ /*00f0*/ I2F R7, R4 ; /* 0x0000000400077306 */ /* 0x000e220000201400 */ /*0100*/ IMAD.WIDE.U32 R2, R0, R3, c[0x0][0x160] ; /* 0x0000580000027625 */ /* 0x000fc800078e0003 */ /*0110*/ IMAD R9, R0, c[0x0][0x174], R9 ; /* 0x00005d0000097a24 */ /* 0x000fca00078e0209 */ /*0120*/ IADD3 R3, R3, R9, RZ ; /* 0x0000000903037210 */ /* 0x000fca0007ffe0ff */ /*0130*/ IMAD.WIDE R2, R5, 0x4, R2 ; /* 0x0000000405027825 */ /* 0x000fca00078e0202 */ /*0140*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */ /* 0x001fe2000c101904 */ /*0150*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0160*/ BRA 0x160; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ .......... Function : _Z16tensor_1d_assignPfm .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */ /* 0x000e280000002500 */ /*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */ /* 0x000e240000002100 */ /*0030*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */ /* 0x001fca00078e0203 */ /*0040*/ ISETP.GE.U32.AND P0, PT, R0, c[0x0][0x168], PT ; /* 0x00005a0000007a0c */ /* 0x000fe40003f06070 */ /*0050*/ SHF.R.S32.HI R3, RZ, 0x1f, R0 ; /* 0x0000001fff037819 */ /* 0x000fc80000011400 */ /*0060*/ ISETP.GE.U32.AND.EX P0, PT, R3, c[0x0][0x16c], PT, P0 ; /* 0x00005b0003007a0c */ /* 0x000fda0003f06100 */ /*0070*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*0080*/ I2F R5, R0 ; /* 0x0000000000057306 */ /* 0x000e220000201400 */ /*0090*/ LEA R2, P0, R0, c[0x0][0x160], 0x2 ; /* 0x0000580000027a11 */ /* 0x000fe200078010ff */ /*00a0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fc60000000a00 */ /*00b0*/ LEA.HI.X R3, R0, c[0x0][0x164], R3, 0x2, P0 ; /* 0x0000590000037a11 */ /* 0x000fca00000f1403 */ /*00c0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */ /* 0x001fe2000c101904 */ /*00d0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*00e0*/ BRA 0xe0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*00f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0100*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0110*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0120*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0130*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z16tensor_1d_assignPfm .globl _Z16tensor_1d_assignPfm .p2align 8 .type _Z16tensor_1d_assignPfm,@function _Z16tensor_1d_assignPfm: s_clause 0x1 s_load_b32 s4, s[0:1], 0x1c s_load_b64 s[2:3], s[0:1], 0x8 s_waitcnt lgkmcnt(0) s_and_b32 s4, s4, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mad_u64_u32 v[1:2], null, s15, s4, v[0:1] v_ashrrev_i32_e32 v2, 31, v1 s_delay_alu instid0(VALU_DEP_1) v_cmp_gt_u64_e32 vcc_lo, s[2:3], v[1:2] s_and_saveexec_b32 s2, vcc_lo s_cbranch_execz .LBB0_2 s_load_b64 s[0:1], s[0:1], 0x0 v_lshlrev_b64 v[2:3], 2, v[1:2] v_cvt_f32_i32_e32 v4, v1 s_waitcnt lgkmcnt(0) s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) v_add_co_u32 v0, vcc_lo, s0, v2 v_add_co_ci_u32_e32 v1, vcc_lo, s1, v3, vcc_lo global_store_b32 v[0:1], v4, off .LBB0_2: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z16tensor_1d_assignPfm .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 272 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 5 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z16tensor_1d_assignPfm, .Lfunc_end0-_Z16tensor_1d_assignPfm .section .AMDGPU.csdata,"",@progbits .text .protected _Z16tensor_2d_assignPfiim .globl _Z16tensor_2d_assignPfiim .p2align 8 .type _Z16tensor_2d_assignPfiim,@function _Z16tensor_2d_assignPfiim: s_clause 0x1 s_load_b32 s2, s[0:1], 0x24 s_load_b64 s[4:5], s[0:1], 0x8 v_and_b32_e32 v1, 0x3ff, v0 v_bfe_u32 v4, v0, 10, 10 s_waitcnt lgkmcnt(0) s_and_b32 s3, s2, 0xffff s_lshr_b32 s2, s2, 16 v_mad_u64_u32 v[2:3], null, s14, s3, v[1:2] v_mad_u64_u32 v[0:1], null, s15, s2, v[4:5] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_cmp_gt_i32_e32 vcc_lo, s5, v2 v_cmp_gt_i32_e64 s2, s4, v0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_and_b32 s2, vcc_lo, s2 s_and_saveexec_b32 s3, s2 s_cbranch_execz .LBB1_2 s_clause 0x1 s_load_b64 s[2:3], s[0:1], 0x0 s_load_b64 s[0:1], s[0:1], 0x10 v_ashrrev_i32_e32 v1, 31, v2 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_mad_u64_u32 v[3:4], null, v2, s4, v[0:1] v_cvt_f32_i32_e32 v3, v3 s_waitcnt lgkmcnt(0) v_mad_u64_u32 v[4:5], null, v2, s0, s[2:3] v_mul_lo_u32 v2, v2, s1 v_mul_lo_u32 v6, v1, s0 v_ashrrev_i32_e32 v1, 31, v0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) v_lshlrev_b64 v[0:1], 2, v[0:1] v_add3_u32 v2, v6, v5, v2 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v0, vcc_lo, v4, v0 v_add_co_ci_u32_e32 v1, vcc_lo, v2, v1, vcc_lo global_store_b32 v[0:1], v3, off .LBB1_2: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z16tensor_2d_assignPfiim .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 280 .amdhsa_user_sgpr_count 14 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 1 .amdhsa_next_free_vgpr 7 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end1: .size _Z16tensor_2d_assignPfiim, .Lfunc_end1-_Z16tensor_2d_assignPfiim .section .AMDGPU.csdata,"",@progbits .text .protected _Z16tensor_3d_assign13hipPitchedPtriii .globl _Z16tensor_3d_assign13hipPitchedPtriii .p2align 8 .type _Z16tensor_3d_assign13hipPitchedPtriii,@function _Z16tensor_3d_assign13hipPitchedPtriii: s_clause 0x2 s_load_b64 s[2:3], s[0:1], 0x24 s_load_b128 s[4:7], s[0:1], 0x0 s_load_b64 s[0:1], s[0:1], 0x3c v_and_b32_e32 v1, 0x3ff, v0 v_bfe_u32 v2, v0, 10, 10 v_bfe_u32 v0, v0, 20, 10 s_waitcnt lgkmcnt(0) s_ashr_i32 s8, s2, 31 s_mul_i32 s11, s6, s2 s_and_b32 s10, s0, 0xffff s_lshr_b32 s0, s0, 16 v_mad_u64_u32 v[3:4], null, s13, s10, v[1:2] s_mul_hi_u32 s9, s6, s2 s_mul_i32 s8, s6, s8 s_and_b32 s1, s1, 0xffff s_mul_i32 s10, s7, s2 s_add_i32 s8, s9, s8 s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1) s_add_i32 s8, s8, s10 v_mad_u64_u32 v[4:5], null, s14, s0, v[2:3] v_mad_u64_u32 v[1:2], null, s11, v3, s[4:5] v_ashrrev_i32_e32 v6, 31, v3 v_mul_lo_u32 v8, s8, v3 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4) v_mul_lo_u32 v9, s11, v6 v_mad_u64_u32 v[5:6], null, s15, s1, v[0:1] v_ashrrev_i32_e32 v0, 31, v4 s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3) v_add3_u32 v2, v8, v2, v9 v_mad_u64_u32 v[6:7], null, v3, s2, v[4:5] v_mul_lo_u32 v3, s7, v4 s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3) v_mul_lo_u32 v10, s6, v0 v_mad_u64_u32 v[7:8], null, v6, s3, v[5:6] v_mad_u64_u32 v[8:9], null, s6, v4, v[1:2] v_ashrrev_i32_e32 v6, 31, v5 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3) v_lshlrev_b64 v[0:1], 2, v[5:6] v_add3_u32 v2, v3, v9, v10 s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) v_add_co_u32 v0, vcc_lo, v8, v0 v_add_co_ci_u32_e32 v1, vcc_lo, v2, v1, vcc_lo global_store_b32 v[0:1], v7, off s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z16tensor_3d_assign13hipPitchedPtriii .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 304 .amdhsa_user_sgpr_count 13 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 1 .amdhsa_system_sgpr_workgroup_id_z 1 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 2 .amdhsa_next_free_vgpr 11 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end2: .size _Z16tensor_3d_assign13hipPitchedPtriii, .Lfunc_end2-_Z16tensor_3d_assign13hipPitchedPtriii .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .offset: 8 .size: 8 .value_kind: by_value - .offset: 16 .size: 4 .value_kind: hidden_block_count_x - .offset: 20 .size: 4 .value_kind: hidden_block_count_y - .offset: 24 .size: 4 .value_kind: hidden_block_count_z - .offset: 28 .size: 2 .value_kind: hidden_group_size_x - .offset: 30 .size: 2 .value_kind: hidden_group_size_y - .offset: 32 .size: 2 .value_kind: hidden_group_size_z - .offset: 34 .size: 2 .value_kind: hidden_remainder_x - .offset: 36 .size: 2 .value_kind: hidden_remainder_y - .offset: 38 .size: 2 .value_kind: hidden_remainder_z - .offset: 56 .size: 8 .value_kind: hidden_global_offset_x - .offset: 64 .size: 8 .value_kind: hidden_global_offset_y - .offset: 72 .size: 8 .value_kind: hidden_global_offset_z - .offset: 80 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 272 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z16tensor_1d_assignPfm .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z16tensor_1d_assignPfm.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 5 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .offset: 8 .size: 4 .value_kind: by_value - .offset: 12 .size: 4 .value_kind: by_value - .offset: 16 .size: 8 .value_kind: by_value - .offset: 24 .size: 4 .value_kind: hidden_block_count_x - .offset: 28 .size: 4 .value_kind: hidden_block_count_y - .offset: 32 .size: 4 .value_kind: hidden_block_count_z - .offset: 36 .size: 2 .value_kind: hidden_group_size_x - .offset: 38 .size: 2 .value_kind: hidden_group_size_y - .offset: 40 .size: 2 .value_kind: hidden_group_size_z - .offset: 42 .size: 2 .value_kind: hidden_remainder_x - .offset: 44 .size: 2 .value_kind: hidden_remainder_y - .offset: 46 .size: 2 .value_kind: hidden_remainder_z - .offset: 64 .size: 8 .value_kind: hidden_global_offset_x - .offset: 72 .size: 8 .value_kind: hidden_global_offset_y - .offset: 80 .size: 8 .value_kind: hidden_global_offset_z - .offset: 88 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 280 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z16tensor_2d_assignPfiim .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z16tensor_2d_assignPfiim.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 7 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 - .args: - .offset: 0 .size: 32 .value_kind: by_value - .offset: 32 .size: 4 .value_kind: by_value - .offset: 36 .size: 4 .value_kind: by_value - .offset: 40 .size: 4 .value_kind: by_value - .offset: 48 .size: 4 .value_kind: hidden_block_count_x - .offset: 52 .size: 4 .value_kind: hidden_block_count_y - .offset: 56 .size: 4 .value_kind: hidden_block_count_z - .offset: 60 .size: 2 .value_kind: hidden_group_size_x - .offset: 62 .size: 2 .value_kind: hidden_group_size_y - .offset: 64 .size: 2 .value_kind: hidden_group_size_z - .offset: 66 .size: 2 .value_kind: hidden_remainder_x - .offset: 68 .size: 2 .value_kind: hidden_remainder_y - .offset: 70 .size: 2 .value_kind: hidden_remainder_z - .offset: 88 .size: 8 .value_kind: hidden_global_offset_x - .offset: 96 .size: 8 .value_kind: hidden_global_offset_y - .offset: 104 .size: 8 .value_kind: hidden_global_offset_z - .offset: 112 .size: 2 .value_kind: hidden_grid_dims .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 304 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z16tensor_3d_assign13hipPitchedPtriii .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z16tensor_3d_assign13hipPitchedPtriii.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 11 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include "includes.h" __global__ void entrySearch_max_int_kernel(int *g_iarr, int *g_maxarr, int size) { // create shared memory extern __shared__ int sarr_int[]; // load shared mem unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x * blockDim.x * 2 + threadIdx.x; if(i + blockDim.x < size) { if(g_iarr[i] > g_iarr[i + blockDim.x]) { sarr_int[tid] = g_iarr[i]; } else { sarr_int[tid] = g_iarr[i + blockDim.x]; } } else if (i < size) { sarr_int[tid] = g_iarr[i]; } else { sarr_int[tid] = INT_MIN; } __syncthreads(); // do comparison in shared mem for(unsigned int s=blockDim.x/2; s>0; s>>=1) { if(tid < s) { if(sarr_int[tid] < sarr_int[tid + s]) { sarr_int[tid] = sarr_int[tid + s]; } } __syncthreads(); } // write result for this block to global mem if(tid == 0) { g_maxarr[blockIdx.x] = sarr_int[0]; } }
code for sm_80 Function : _Z26entrySearch_max_int_kernelPiS_i .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */ /* 0x000e220000002500 */ /*0020*/ ULDC UR4, c[0x0][0x0] ; /* 0x0000000000047ab9 */ /* 0x000fe20000000800 */ /*0030*/ IMAD.MOV.U32 R5, RZ, RZ, 0x4 ; /* 0x00000004ff057424 */ /* 0x000fe200078e00ff */ /*0040*/ USHF.L.U32 UR5, UR4, 0x1, URZ ; /* 0x0000000104057899 */ /* 0x000fe2000800063f */ /*0050*/ S2R R7, SR_TID.X ; /* 0x0000000000077919 */ /* 0x000e220000002100 */ /*0060*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */ /* 0x000fe20000000a00 */ /*0070*/ BSSY B0, 0x170 ; /* 0x000000f000007945 */ /* 0x000fe60003800000 */ /*0080*/ IMAD R0, R6, UR5, R7 ; /* 0x0000000506007c24 */ /* 0x001fc8000f8e0207 */ /*0090*/ IMAD.WIDE.U32 R2, R0.reuse, R5, c[0x0][0x160] ; /* 0x0000580000027625 */ /* 0x040fe200078e0005 */ /*00a0*/ IADD3 R4, R0, c[0x0][0x0], RZ ; /* 0x0000000000047a10 */ /* 0x000fc80007ffe0ff */ /*00b0*/ ISETP.GE.U32.AND P0, PT, R4, c[0x0][0x170], PT ; /* 0x00005c0004007a0c */ /* 0x000fda0003f06070 */ /*00c0*/ @!P0 BRA 0x120 ; /* 0x0000005000008947 */ /* 0x000fea0003800000 */ /*00d0*/ ISETP.GE.U32.AND P0, PT, R0, c[0x0][0x170], PT ; /* 0x00005c0000007a0c */ /* 0x000fe20003f06070 */ /*00e0*/ IMAD.MOV.U32 R0, RZ, RZ, -0x80000000 ; /* 0x80000000ff007424 */ /* 0x000fd800078e00ff */ /*00f0*/ @P0 BRA 0x160 ; /* 0x0000006000000947 */ /* 0x000fea0003800000 */ /*0100*/ LDG.E R0, [R2.64] ; /* 0x0000000602007981 */ /* 0x000162000c1e1900 */ /*0110*/ BRA 0x160 ; /* 0x0000004000007947 */ /* 0x000fea0003800000 */ /*0120*/ IMAD.WIDE.U32 R4, R4, R5, c[0x0][0x160] ; /* 0x0000580004047625 */ /* 0x000fe200078e0005 */ /*0130*/ LDG.E R3, [R2.64] ; /* 0x0000000602037981 */ /* 0x000eaa000c1e1900 */ /*0140*/ LDG.E R4, [R4.64] ; /* 0x0000000604047981 */ /* 0x000ea4000c1e1900 */ /*0150*/ IMNMX R0, R4, R3, !PT ; /* 0x0000000304007217 */ /* 0x004fe40007800200 */ /*0160*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*0170*/ USHF.R.U32.HI UR4, URZ, 0x1, UR4 ; /* 0x000000013f047899 */ /* 0x000fe20008011604 */ /*0180*/ STS [R7.X4], R0 ; /* 0x0000000007007388 */ /* 0x0203e80000004800 */ /*0190*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fe20000010000 */ /*01a0*/ ISETP.NE.AND P0, PT, RZ, UR4, PT ; /* 0x00000004ff007c0c */ /* 0x000fda000bf05270 */ /*01b0*/ @!P0 BRA 0x2b0 ; /* 0x000000f000008947 */ /* 0x000fea0003800000 */ /*01c0*/ SHF.L.U32 R0, R7, 0x2, RZ ; /* 0x0000000207007819 */ /* 0x002fe200000006ff */ /*01d0*/ IMAD.U32 R3, RZ, RZ, UR4 ; /* 0x00000004ff037e24 */ /* 0x001fca000f8e00ff */ /*01e0*/ ISETP.GE.U32.AND P0, PT, R7, R3, PT ; /* 0x000000030700720c */ /* 0x000fe20003f06070 */ /*01f0*/ BSSY B0, 0x270 ; /* 0x0000007000007945 */ /* 0x000fd80003800000 */ /*0200*/ @P0 BRA 0x260 ; /* 0x0000005000000947 */ /* 0x001fea0003800000 */ /*0210*/ IMAD R4, R3, 0x4, R0 ; /* 0x0000000403047824 */ /* 0x000fe200078e0200 */ /*0220*/ LDS R2, [R7.X4] ; /* 0x0000000007027984 */ /* 0x000fea0000004800 */ /*0230*/ LDS R4, [R4] ; /* 0x0000000004047984 */ /* 0x000e240000000800 */ /*0240*/ ISETP.GE.AND P0, PT, R2, R4, PT ; /* 0x000000040200720c */ /* 0x001fda0003f06270 */ /*0250*/ @!P0 STS [R7.X4], R4 ; /* 0x0000000407008388 */ /* 0x0001e40000004800 */ /*0260*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*0270*/ SHF.R.U32.HI R3, RZ, 0x1, R3 ; /* 0x00000001ff037819 */ /* 0x000fe20000011603 */ /*0280*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fe60000010000 */ /*0290*/ ISETP.NE.AND P0, PT, R3, RZ, PT ; /* 0x000000ff0300720c */ /* 0x000fda0003f05270 */ /*02a0*/ @P0 BRA 0x1e0 ; /* 0xffffff3000000947 */ /* 0x000fea000383ffff */ /*02b0*/ ISETP.NE.AND P0, PT, R7, RZ, PT ; /* 0x000000ff0700720c */ /* 0x002fda0003f05270 */ /*02c0*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*02d0*/ LDS R5, [RZ] ; /* 0x00000000ff057984 */ /* 0x000e620000000800 */ /*02e0*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */ /* 0x001fd400000001ff */ /*02f0*/ IMAD.WIDE.U32 R2, R6, R3, c[0x0][0x168] ; /* 0x00005a0006027625 */ /* 0x000fca00078e0003 */ /*0300*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */ /* 0x002fe2000c101906 */ /*0310*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0320*/ BRA 0x320; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0330*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0340*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0350*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0360*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0370*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0380*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0390*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include "includes.h" __global__ void entrySearch_max_int_kernel(int *g_iarr, int *g_maxarr, int size) { // create shared memory extern __shared__ int sarr_int[]; // load shared mem unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x * blockDim.x * 2 + threadIdx.x; if(i + blockDim.x < size) { if(g_iarr[i] > g_iarr[i + blockDim.x]) { sarr_int[tid] = g_iarr[i]; } else { sarr_int[tid] = g_iarr[i + blockDim.x]; } } else if (i < size) { sarr_int[tid] = g_iarr[i]; } else { sarr_int[tid] = INT_MIN; } __syncthreads(); // do comparison in shared mem for(unsigned int s=blockDim.x/2; s>0; s>>=1) { if(tid < s) { if(sarr_int[tid] < sarr_int[tid + s]) { sarr_int[tid] = sarr_int[tid + s]; } } __syncthreads(); } // write result for this block to global mem if(tid == 0) { g_maxarr[blockIdx.x] = sarr_int[0]; } }
.file "tmpxft_000d6029_00000000-6_entrySearch_max_int_kernel.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z49__device_stub__Z26entrySearch_max_int_kernelPiS_iPiS_i .type _Z49__device_stub__Z26entrySearch_max_int_kernelPiS_iPiS_i, @function _Z49__device_stub__Z26entrySearch_max_int_kernelPiS_iPiS_i: .LFB2051: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movl %edx, 12(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 120(%rsp), %rax subq %fs:40, %rax jne .L8 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z26entrySearch_max_int_kernelPiS_i(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z49__device_stub__Z26entrySearch_max_int_kernelPiS_iPiS_i, .-_Z49__device_stub__Z26entrySearch_max_int_kernelPiS_iPiS_i .globl _Z26entrySearch_max_int_kernelPiS_i .type _Z26entrySearch_max_int_kernelPiS_i, @function _Z26entrySearch_max_int_kernelPiS_i: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z49__device_stub__Z26entrySearch_max_int_kernelPiS_iPiS_i addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _Z26entrySearch_max_int_kernelPiS_i, .-_Z26entrySearch_max_int_kernelPiS_i .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC0: .string "_Z26entrySearch_max_int_kernelPiS_i" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z26entrySearch_max_int_kernelPiS_i(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include "includes.h" __global__ void entrySearch_max_int_kernel(int *g_iarr, int *g_maxarr, int size) { // create shared memory extern __shared__ int sarr_int[]; // load shared mem unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x * blockDim.x * 2 + threadIdx.x; if(i + blockDim.x < size) { if(g_iarr[i] > g_iarr[i + blockDim.x]) { sarr_int[tid] = g_iarr[i]; } else { sarr_int[tid] = g_iarr[i + blockDim.x]; } } else if (i < size) { sarr_int[tid] = g_iarr[i]; } else { sarr_int[tid] = INT_MIN; } __syncthreads(); // do comparison in shared mem for(unsigned int s=blockDim.x/2; s>0; s>>=1) { if(tid < s) { if(sarr_int[tid] < sarr_int[tid + s]) { sarr_int[tid] = sarr_int[tid + s]; } } __syncthreads(); } // write result for this block to global mem if(tid == 0) { g_maxarr[blockIdx.x] = sarr_int[0]; } }
#include <hip/hip_runtime.h> #include "includes.h" __global__ void entrySearch_max_int_kernel(int *g_iarr, int *g_maxarr, int size) { // create shared memory extern __shared__ int sarr_int[]; // load shared mem unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x * blockDim.x * 2 + threadIdx.x; if(i + blockDim.x < size) { if(g_iarr[i] > g_iarr[i + blockDim.x]) { sarr_int[tid] = g_iarr[i]; } else { sarr_int[tid] = g_iarr[i + blockDim.x]; } } else if (i < size) { sarr_int[tid] = g_iarr[i]; } else { sarr_int[tid] = INT_MIN; } __syncthreads(); // do comparison in shared mem for(unsigned int s=blockDim.x/2; s>0; s>>=1) { if(tid < s) { if(sarr_int[tid] < sarr_int[tid + s]) { sarr_int[tid] = sarr_int[tid + s]; } } __syncthreads(); } // write result for this block to global mem if(tid == 0) { g_maxarr[blockIdx.x] = sarr_int[0]; } }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
#include <hip/hip_runtime.h> #include "includes.h" __global__ void entrySearch_max_int_kernel(int *g_iarr, int *g_maxarr, int size) { // create shared memory extern __shared__ int sarr_int[]; // load shared mem unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x * blockDim.x * 2 + threadIdx.x; if(i + blockDim.x < size) { if(g_iarr[i] > g_iarr[i + blockDim.x]) { sarr_int[tid] = g_iarr[i]; } else { sarr_int[tid] = g_iarr[i + blockDim.x]; } } else if (i < size) { sarr_int[tid] = g_iarr[i]; } else { sarr_int[tid] = INT_MIN; } __syncthreads(); // do comparison in shared mem for(unsigned int s=blockDim.x/2; s>0; s>>=1) { if(tid < s) { if(sarr_int[tid] < sarr_int[tid + s]) { sarr_int[tid] = sarr_int[tid + s]; } } __syncthreads(); } // write result for this block to global mem if(tid == 0) { g_maxarr[blockIdx.x] = sarr_int[0]; } }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z26entrySearch_max_int_kernelPiS_i .globl _Z26entrySearch_max_int_kernelPiS_i .p2align 8 .type _Z26entrySearch_max_int_kernelPiS_i,@function _Z26entrySearch_max_int_kernelPiS_i: s_clause 0x2 s_load_b32 s3, s[0:1], 0x24 s_load_b32 s7, s[0:1], 0x10 s_load_b64 s[4:5], s[0:1], 0x0 s_mov_b32 s2, s15 s_waitcnt lgkmcnt(0) s_and_b32 s3, s3, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_mul_i32 s6, s15, s3 v_lshl_add_u32 v1, s6, 1, v0 s_mov_b32 s6, exec_lo s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_add_nc_u32_e32 v3, s3, v1 v_cmpx_le_u32_e64 s7, v3 s_xor_b32 s6, exec_lo, s6 s_cbranch_execz .LBB0_6 v_cmp_le_u32_e32 vcc_lo, s7, v1 s_and_saveexec_b32 s7, vcc_lo s_delay_alu instid0(SALU_CYCLE_1) s_xor_b32 s7, exec_lo, s7 s_cbranch_execz .LBB0_3 v_lshl_add_u32 v1, v0, 2, 0 v_bfrev_b32_e32 v2, 1 ds_store_b32 v1, v2 .LBB0_3: s_and_not1_saveexec_b32 s7, s7 s_cbranch_execz .LBB0_5 v_mov_b32_e32 v2, 0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[1:2], 2, v[1:2] v_add_co_u32 v1, vcc_lo, s4, v1 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v2, vcc_lo, s5, v2, vcc_lo global_load_b32 v1, v[1:2], off v_lshl_add_u32 v2, v0, 2, 0 s_waitcnt vmcnt(0) ds_store_b32 v2, v1 .LBB0_5: s_or_b32 exec_lo, exec_lo, s7 .LBB0_6: s_and_not1_saveexec_b32 s6, s6 s_cbranch_execz .LBB0_12 v_mov_b32_e32 v2, 0 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_mov_b32_e32 v4, v2 v_lshlrev_b64 v[1:2], 2, v[1:2] v_lshlrev_b64 v[3:4], 2, v[3:4] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) v_add_co_u32 v1, vcc_lo, s4, v1 v_add_co_ci_u32_e32 v2, vcc_lo, s5, v2, vcc_lo s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4) v_add_co_u32 v3, vcc_lo, s4, v3 v_add_co_ci_u32_e32 v4, vcc_lo, s5, v4, vcc_lo s_mov_b32 s4, exec_lo s_clause 0x1 global_load_b32 v1, v[1:2], off global_load_b32 v3, v[3:4], off v_lshl_add_u32 v2, v0, 2, 0 s_waitcnt vmcnt(0) v_cmpx_le_i32_e64 v1, v3 s_xor_b32 s4, exec_lo, s4 s_cbranch_execz .LBB0_9 ds_store_b32 v2, v3 .LBB0_9: s_and_not1_saveexec_b32 s4, s4 s_cbranch_execz .LBB0_11 ds_store_b32 v2, v1 .LBB0_11: s_or_b32 exec_lo, exec_lo, s4 .LBB0_12: s_delay_alu instid0(SALU_CYCLE_1) s_or_b32 exec_lo, exec_lo, s6 s_cmp_lt_u32 s3, 2 s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv s_cbranch_scc1 .LBB0_18 v_lshl_add_u32 v1, v0, 2, 0 s_branch .LBB0_15 .p2align 6 .LBB0_14: s_or_b32 exec_lo, exec_lo, s5 s_cmp_lt_u32 s4, 4 s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv s_cbranch_scc1 .LBB0_18 .LBB0_15: s_mov_b32 s4, s3 s_lshr_b32 s3, s3, 1 s_mov_b32 s5, exec_lo v_cmpx_gt_u32_e64 s3, v0 s_cbranch_execz .LBB0_14 v_add_nc_u32_e32 v2, s3, v0 s_delay_alu instid0(VALU_DEP_1) v_lshl_add_u32 v2, v2, 2, 0 ds_load_b32 v3, v1 ds_load_b32 v2, v2 s_waitcnt lgkmcnt(0) v_cmp_lt_i32_e32 vcc_lo, v3, v2 s_and_b32 exec_lo, exec_lo, vcc_lo s_cbranch_execz .LBB0_14 ds_store_b32 v1, v2 s_branch .LBB0_14 .LBB0_18: s_mov_b32 s3, exec_lo v_cmpx_eq_u32_e32 0, v0 s_cbranch_execz .LBB0_20 v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0 s_load_b64 s[0:1], s[0:1], 0x8 s_mov_b32 s3, 0 s_delay_alu instid0(SALU_CYCLE_1) s_lshl_b64 s[2:3], s[2:3], 2 ds_load_b32 v0, v0 s_waitcnt lgkmcnt(0) s_add_u32 s0, s0, s2 s_addc_u32 s1, s1, s3 global_store_b32 v1, v0, s[0:1] .LBB0_20: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z26entrySearch_max_int_kernelPiS_i .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 280 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 5 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z26entrySearch_max_int_kernelPiS_i, .Lfunc_end0-_Z26entrySearch_max_int_kernelPiS_i .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: by_value - .offset: 24 .size: 4 .value_kind: hidden_block_count_x - .offset: 28 .size: 4 .value_kind: hidden_block_count_y - .offset: 32 .size: 4 .value_kind: hidden_block_count_z - .offset: 36 .size: 2 .value_kind: hidden_group_size_x - .offset: 38 .size: 2 .value_kind: hidden_group_size_y - .offset: 40 .size: 2 .value_kind: hidden_group_size_z - .offset: 42 .size: 2 .value_kind: hidden_remainder_x - .offset: 44 .size: 2 .value_kind: hidden_remainder_y - .offset: 46 .size: 2 .value_kind: hidden_remainder_z - .offset: 64 .size: 8 .value_kind: hidden_global_offset_x - .offset: 72 .size: 8 .value_kind: hidden_global_offset_y - .offset: 80 .size: 8 .value_kind: hidden_global_offset_z - .offset: 88 .size: 2 .value_kind: hidden_grid_dims - .offset: 144 .size: 4 .value_kind: hidden_dynamic_lds_size .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 280 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z26entrySearch_max_int_kernelPiS_i .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z26entrySearch_max_int_kernelPiS_i.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 5 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <hip/hip_runtime.h> #include "includes.h" __global__ void entrySearch_max_int_kernel(int *g_iarr, int *g_maxarr, int size) { // create shared memory extern __shared__ int sarr_int[]; // load shared mem unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x * blockDim.x * 2 + threadIdx.x; if(i + blockDim.x < size) { if(g_iarr[i] > g_iarr[i + blockDim.x]) { sarr_int[tid] = g_iarr[i]; } else { sarr_int[tid] = g_iarr[i + blockDim.x]; } } else if (i < size) { sarr_int[tid] = g_iarr[i]; } else { sarr_int[tid] = INT_MIN; } __syncthreads(); // do comparison in shared mem for(unsigned int s=blockDim.x/2; s>0; s>>=1) { if(tid < s) { if(sarr_int[tid] < sarr_int[tid + s]) { sarr_int[tid] = sarr_int[tid + s]; } } __syncthreads(); } // write result for this block to global mem if(tid == 0) { g_maxarr[blockIdx.x] = sarr_int[0]; } }
.text .file "entrySearch_max_int_kernel.hip" .globl _Z41__device_stub__entrySearch_max_int_kernelPiS_i # -- Begin function _Z41__device_stub__entrySearch_max_int_kernelPiS_i .p2align 4, 0x90 .type _Z41__device_stub__entrySearch_max_int_kernelPiS_i,@function _Z41__device_stub__entrySearch_max_int_kernelPiS_i: # @_Z41__device_stub__entrySearch_max_int_kernelPiS_i .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movl %edx, 12(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z26entrySearch_max_int_kernelPiS_i, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end0: .size _Z41__device_stub__entrySearch_max_int_kernelPiS_i, .Lfunc_end0-_Z41__device_stub__entrySearch_max_int_kernelPiS_i .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z26entrySearch_max_int_kernelPiS_i, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z26entrySearch_max_int_kernelPiS_i,@object # @_Z26entrySearch_max_int_kernelPiS_i .section .rodata,"a",@progbits .globl _Z26entrySearch_max_int_kernelPiS_i .p2align 3, 0x0 _Z26entrySearch_max_int_kernelPiS_i: .quad _Z41__device_stub__entrySearch_max_int_kernelPiS_i .size _Z26entrySearch_max_int_kernelPiS_i, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z26entrySearch_max_int_kernelPiS_i" .size .L__unnamed_1, 36 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z41__device_stub__entrySearch_max_int_kernelPiS_i .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z26entrySearch_max_int_kernelPiS_i .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z26entrySearch_max_int_kernelPiS_i .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fe400078e00ff */ /*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */ /* 0x000e220000002500 */ /*0020*/ ULDC UR4, c[0x0][0x0] ; /* 0x0000000000047ab9 */ /* 0x000fe20000000800 */ /*0030*/ IMAD.MOV.U32 R5, RZ, RZ, 0x4 ; /* 0x00000004ff057424 */ /* 0x000fe200078e00ff */ /*0040*/ USHF.L.U32 UR5, UR4, 0x1, URZ ; /* 0x0000000104057899 */ /* 0x000fe2000800063f */ /*0050*/ S2R R7, SR_TID.X ; /* 0x0000000000077919 */ /* 0x000e220000002100 */ /*0060*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */ /* 0x000fe20000000a00 */ /*0070*/ BSSY B0, 0x170 ; /* 0x000000f000007945 */ /* 0x000fe60003800000 */ /*0080*/ IMAD R0, R6, UR5, R7 ; /* 0x0000000506007c24 */ /* 0x001fc8000f8e0207 */ /*0090*/ IMAD.WIDE.U32 R2, R0.reuse, R5, c[0x0][0x160] ; /* 0x0000580000027625 */ /* 0x040fe200078e0005 */ /*00a0*/ IADD3 R4, R0, c[0x0][0x0], RZ ; /* 0x0000000000047a10 */ /* 0x000fc80007ffe0ff */ /*00b0*/ ISETP.GE.U32.AND P0, PT, R4, c[0x0][0x170], PT ; /* 0x00005c0004007a0c */ /* 0x000fda0003f06070 */ /*00c0*/ @!P0 BRA 0x120 ; /* 0x0000005000008947 */ /* 0x000fea0003800000 */ /*00d0*/ ISETP.GE.U32.AND P0, PT, R0, c[0x0][0x170], PT ; /* 0x00005c0000007a0c */ /* 0x000fe20003f06070 */ /*00e0*/ IMAD.MOV.U32 R0, RZ, RZ, -0x80000000 ; /* 0x80000000ff007424 */ /* 0x000fd800078e00ff */ /*00f0*/ @P0 BRA 0x160 ; /* 0x0000006000000947 */ /* 0x000fea0003800000 */ /*0100*/ LDG.E R0, [R2.64] ; /* 0x0000000602007981 */ /* 0x000162000c1e1900 */ /*0110*/ BRA 0x160 ; /* 0x0000004000007947 */ /* 0x000fea0003800000 */ /*0120*/ IMAD.WIDE.U32 R4, R4, R5, c[0x0][0x160] ; /* 0x0000580004047625 */ /* 0x000fe200078e0005 */ /*0130*/ LDG.E R3, [R2.64] ; /* 0x0000000602037981 */ /* 0x000eaa000c1e1900 */ /*0140*/ LDG.E R4, [R4.64] ; /* 0x0000000604047981 */ /* 0x000ea4000c1e1900 */ /*0150*/ IMNMX R0, R4, R3, !PT ; /* 0x0000000304007217 */ /* 0x004fe40007800200 */ /*0160*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*0170*/ USHF.R.U32.HI UR4, URZ, 0x1, UR4 ; /* 0x000000013f047899 */ /* 0x000fe20008011604 */ /*0180*/ STS [R7.X4], R0 ; /* 0x0000000007007388 */ /* 0x0203e80000004800 */ /*0190*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fe20000010000 */ /*01a0*/ ISETP.NE.AND P0, PT, RZ, UR4, PT ; /* 0x00000004ff007c0c */ /* 0x000fda000bf05270 */ /*01b0*/ @!P0 BRA 0x2b0 ; /* 0x000000f000008947 */ /* 0x000fea0003800000 */ /*01c0*/ SHF.L.U32 R0, R7, 0x2, RZ ; /* 0x0000000207007819 */ /* 0x002fe200000006ff */ /*01d0*/ IMAD.U32 R3, RZ, RZ, UR4 ; /* 0x00000004ff037e24 */ /* 0x001fca000f8e00ff */ /*01e0*/ ISETP.GE.U32.AND P0, PT, R7, R3, PT ; /* 0x000000030700720c */ /* 0x000fe20003f06070 */ /*01f0*/ BSSY B0, 0x270 ; /* 0x0000007000007945 */ /* 0x000fd80003800000 */ /*0200*/ @P0 BRA 0x260 ; /* 0x0000005000000947 */ /* 0x001fea0003800000 */ /*0210*/ IMAD R4, R3, 0x4, R0 ; /* 0x0000000403047824 */ /* 0x000fe200078e0200 */ /*0220*/ LDS R2, [R7.X4] ; /* 0x0000000007027984 */ /* 0x000fea0000004800 */ /*0230*/ LDS R4, [R4] ; /* 0x0000000004047984 */ /* 0x000e240000000800 */ /*0240*/ ISETP.GE.AND P0, PT, R2, R4, PT ; /* 0x000000040200720c */ /* 0x001fda0003f06270 */ /*0250*/ @!P0 STS [R7.X4], R4 ; /* 0x0000000407008388 */ /* 0x0001e40000004800 */ /*0260*/ BSYNC B0 ; /* 0x0000000000007941 */ /* 0x000fea0003800000 */ /*0270*/ SHF.R.U32.HI R3, RZ, 0x1, R3 ; /* 0x00000001ff037819 */ /* 0x000fe20000011603 */ /*0280*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */ /* 0x000fe60000010000 */ /*0290*/ ISETP.NE.AND P0, PT, R3, RZ, PT ; /* 0x000000ff0300720c */ /* 0x000fda0003f05270 */ /*02a0*/ @P0 BRA 0x1e0 ; /* 0xffffff3000000947 */ /* 0x000fea000383ffff */ /*02b0*/ ISETP.NE.AND P0, PT, R7, RZ, PT ; /* 0x000000ff0700720c */ /* 0x002fda0003f05270 */ /*02c0*/ @P0 EXIT ; /* 0x000000000000094d */ /* 0x000fea0003800000 */ /*02d0*/ LDS R5, [RZ] ; /* 0x00000000ff057984 */ /* 0x000e620000000800 */ /*02e0*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */ /* 0x001fd400000001ff */ /*02f0*/ IMAD.WIDE.U32 R2, R6, R3, c[0x0][0x168] ; /* 0x00005a0006027625 */ /* 0x000fca00078e0003 */ /*0300*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */ /* 0x002fe2000c101906 */ /*0310*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0320*/ BRA 0x320; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0330*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0340*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0350*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0360*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0370*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0380*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0390*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*03f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z26entrySearch_max_int_kernelPiS_i .globl _Z26entrySearch_max_int_kernelPiS_i .p2align 8 .type _Z26entrySearch_max_int_kernelPiS_i,@function _Z26entrySearch_max_int_kernelPiS_i: s_clause 0x2 s_load_b32 s3, s[0:1], 0x24 s_load_b32 s7, s[0:1], 0x10 s_load_b64 s[4:5], s[0:1], 0x0 s_mov_b32 s2, s15 s_waitcnt lgkmcnt(0) s_and_b32 s3, s3, 0xffff s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) s_mul_i32 s6, s15, s3 v_lshl_add_u32 v1, s6, 1, v0 s_mov_b32 s6, exec_lo s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_add_nc_u32_e32 v3, s3, v1 v_cmpx_le_u32_e64 s7, v3 s_xor_b32 s6, exec_lo, s6 s_cbranch_execz .LBB0_6 v_cmp_le_u32_e32 vcc_lo, s7, v1 s_and_saveexec_b32 s7, vcc_lo s_delay_alu instid0(SALU_CYCLE_1) s_xor_b32 s7, exec_lo, s7 s_cbranch_execz .LBB0_3 v_lshl_add_u32 v1, v0, 2, 0 v_bfrev_b32_e32 v2, 1 ds_store_b32 v1, v2 .LBB0_3: s_and_not1_saveexec_b32 s7, s7 s_cbranch_execz .LBB0_5 v_mov_b32_e32 v2, 0 s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) v_lshlrev_b64 v[1:2], 2, v[1:2] v_add_co_u32 v1, vcc_lo, s4, v1 s_delay_alu instid0(VALU_DEP_2) v_add_co_ci_u32_e32 v2, vcc_lo, s5, v2, vcc_lo global_load_b32 v1, v[1:2], off v_lshl_add_u32 v2, v0, 2, 0 s_waitcnt vmcnt(0) ds_store_b32 v2, v1 .LBB0_5: s_or_b32 exec_lo, exec_lo, s7 .LBB0_6: s_and_not1_saveexec_b32 s6, s6 s_cbranch_execz .LBB0_12 v_mov_b32_e32 v2, 0 s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2) v_mov_b32_e32 v4, v2 v_lshlrev_b64 v[1:2], 2, v[1:2] v_lshlrev_b64 v[3:4], 2, v[3:4] s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3) v_add_co_u32 v1, vcc_lo, s4, v1 v_add_co_ci_u32_e32 v2, vcc_lo, s5, v2, vcc_lo s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4) v_add_co_u32 v3, vcc_lo, s4, v3 v_add_co_ci_u32_e32 v4, vcc_lo, s5, v4, vcc_lo s_mov_b32 s4, exec_lo s_clause 0x1 global_load_b32 v1, v[1:2], off global_load_b32 v3, v[3:4], off v_lshl_add_u32 v2, v0, 2, 0 s_waitcnt vmcnt(0) v_cmpx_le_i32_e64 v1, v3 s_xor_b32 s4, exec_lo, s4 s_cbranch_execz .LBB0_9 ds_store_b32 v2, v3 .LBB0_9: s_and_not1_saveexec_b32 s4, s4 s_cbranch_execz .LBB0_11 ds_store_b32 v2, v1 .LBB0_11: s_or_b32 exec_lo, exec_lo, s4 .LBB0_12: s_delay_alu instid0(SALU_CYCLE_1) s_or_b32 exec_lo, exec_lo, s6 s_cmp_lt_u32 s3, 2 s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv s_cbranch_scc1 .LBB0_18 v_lshl_add_u32 v1, v0, 2, 0 s_branch .LBB0_15 .p2align 6 .LBB0_14: s_or_b32 exec_lo, exec_lo, s5 s_cmp_lt_u32 s4, 4 s_waitcnt lgkmcnt(0) s_barrier buffer_gl0_inv s_cbranch_scc1 .LBB0_18 .LBB0_15: s_mov_b32 s4, s3 s_lshr_b32 s3, s3, 1 s_mov_b32 s5, exec_lo v_cmpx_gt_u32_e64 s3, v0 s_cbranch_execz .LBB0_14 v_add_nc_u32_e32 v2, s3, v0 s_delay_alu instid0(VALU_DEP_1) v_lshl_add_u32 v2, v2, 2, 0 ds_load_b32 v3, v1 ds_load_b32 v2, v2 s_waitcnt lgkmcnt(0) v_cmp_lt_i32_e32 vcc_lo, v3, v2 s_and_b32 exec_lo, exec_lo, vcc_lo s_cbranch_execz .LBB0_14 ds_store_b32 v1, v2 s_branch .LBB0_14 .LBB0_18: s_mov_b32 s3, exec_lo v_cmpx_eq_u32_e32 0, v0 s_cbranch_execz .LBB0_20 v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0 s_load_b64 s[0:1], s[0:1], 0x8 s_mov_b32 s3, 0 s_delay_alu instid0(SALU_CYCLE_1) s_lshl_b64 s[2:3], s[2:3], 2 ds_load_b32 v0, v0 s_waitcnt lgkmcnt(0) s_add_u32 s0, s0, s2 s_addc_u32 s1, s1, s3 global_store_b32 v1, v0, s[0:1] .LBB0_20: s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z26entrySearch_max_int_kernelPiS_i .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 280 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 5 .amdhsa_next_free_sgpr 16 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z26entrySearch_max_int_kernelPiS_i, .Lfunc_end0-_Z26entrySearch_max_int_kernelPiS_i .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: by_value - .offset: 24 .size: 4 .value_kind: hidden_block_count_x - .offset: 28 .size: 4 .value_kind: hidden_block_count_y - .offset: 32 .size: 4 .value_kind: hidden_block_count_z - .offset: 36 .size: 2 .value_kind: hidden_group_size_x - .offset: 38 .size: 2 .value_kind: hidden_group_size_y - .offset: 40 .size: 2 .value_kind: hidden_group_size_z - .offset: 42 .size: 2 .value_kind: hidden_remainder_x - .offset: 44 .size: 2 .value_kind: hidden_remainder_y - .offset: 46 .size: 2 .value_kind: hidden_remainder_z - .offset: 64 .size: 8 .value_kind: hidden_global_offset_x - .offset: 72 .size: 8 .value_kind: hidden_global_offset_y - .offset: 80 .size: 8 .value_kind: hidden_global_offset_z - .offset: 88 .size: 2 .value_kind: hidden_grid_dims - .offset: 144 .size: 4 .value_kind: hidden_dynamic_lds_size .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 280 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z26entrySearch_max_int_kernelPiS_i .private_segment_fixed_size: 0 .sgpr_count: 18 .sgpr_spill_count: 0 .symbol: _Z26entrySearch_max_int_kernelPiS_i.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 5 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_000d6029_00000000-6_entrySearch_max_int_kernel.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z49__device_stub__Z26entrySearch_max_int_kernelPiS_iPiS_i .type _Z49__device_stub__Z26entrySearch_max_int_kernelPiS_iPiS_i, @function _Z49__device_stub__Z26entrySearch_max_int_kernelPiS_iPiS_i: .LFB2051: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movl %edx, 12(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 120(%rsp), %rax subq %fs:40, %rax jne .L8 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z26entrySearch_max_int_kernelPiS_i(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z49__device_stub__Z26entrySearch_max_int_kernelPiS_iPiS_i, .-_Z49__device_stub__Z26entrySearch_max_int_kernelPiS_iPiS_i .globl _Z26entrySearch_max_int_kernelPiS_i .type _Z26entrySearch_max_int_kernelPiS_i, @function _Z26entrySearch_max_int_kernelPiS_i: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z49__device_stub__Z26entrySearch_max_int_kernelPiS_iPiS_i addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _Z26entrySearch_max_int_kernelPiS_i, .-_Z26entrySearch_max_int_kernelPiS_i .section .rodata.str1.8,"aMS",@progbits,1 .align 8 .LC0: .string "_Z26entrySearch_max_int_kernelPiS_i" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z26entrySearch_max_int_kernelPiS_i(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "entrySearch_max_int_kernel.hip" .globl _Z41__device_stub__entrySearch_max_int_kernelPiS_i # -- Begin function _Z41__device_stub__entrySearch_max_int_kernelPiS_i .p2align 4, 0x90 .type _Z41__device_stub__entrySearch_max_int_kernelPiS_i,@function _Z41__device_stub__entrySearch_max_int_kernelPiS_i: # @_Z41__device_stub__entrySearch_max_int_kernelPiS_i .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movl %edx, 12(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z26entrySearch_max_int_kernelPiS_i, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end0: .size _Z41__device_stub__entrySearch_max_int_kernelPiS_i, .Lfunc_end0-_Z41__device_stub__entrySearch_max_int_kernelPiS_i .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z26entrySearch_max_int_kernelPiS_i, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z26entrySearch_max_int_kernelPiS_i,@object # @_Z26entrySearch_max_int_kernelPiS_i .section .rodata,"a",@progbits .globl _Z26entrySearch_max_int_kernelPiS_i .p2align 3, 0x0 _Z26entrySearch_max_int_kernelPiS_i: .quad _Z41__device_stub__entrySearch_max_int_kernelPiS_i .size _Z26entrySearch_max_int_kernelPiS_i, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z26entrySearch_max_int_kernelPiS_i" .size .L__unnamed_1, 36 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z41__device_stub__entrySearch_max_int_kernelPiS_i .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z26entrySearch_max_int_kernelPiS_i .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <cstdio> #include <thrust/device_vector.h> #include <thrust/functional.h> #include <thrust/iterator/constant_iterator.h> #include <vector> int main() { std::vector<double> data; double temp; while (scanf("%lf", &temp) > 0) { data.push_back(temp); } thrust::device_vector<double> gpu_data(data); thrust::device_vector<double> var(data.size()); double mean = (double) thrust::reduce(gpu_data.begin(), gpu_data.end(), (double) 0, thrust::plus<double>()) / data.size(); thrust::transform(gpu_data.begin(), gpu_data.end(), thrust::constant_iterator<double>(mean), var.begin(), thrust::minus<double>()); thrust::transform(var.begin(), var.end(), var.begin(), var.begin(), thrust::multiplies<double>()); double variance = (double) thrust::reduce(var.begin(), var.end(), (double) 0, thrust::plus<double>()) / data.size(); printf("Mean: %lf Variance: %lf Size: %d\n", mean, variance, data.size()); }
#include <hip/hip_runtime.h> #include <cstdio> #include <thrust/device_vector.h> #include <thrust/functional.h> #include <thrust/iterator/constant_iterator.h> #include <vector> int main() { std::vector<double> data; double temp; while (scanf("%lf", &temp) > 0) { data.push_back(temp); } thrust::device_vector<double> gpu_data(data); thrust::device_vector<double> var(data.size()); double mean = (double) thrust::reduce(gpu_data.begin(), gpu_data.end(), (double) 0, thrust::plus<double>()) / data.size(); thrust::transform(gpu_data.begin(), gpu_data.end(), thrust::constant_iterator<double>(mean), var.begin(), thrust::minus<double>()); thrust::transform(var.begin(), var.end(), var.begin(), var.begin(), thrust::multiplies<double>()); double variance = (double) thrust::reduce(var.begin(), var.end(), (double) 0, thrust::plus<double>()) / data.size(); printf("Mean: %lf Variance: %lf Size: %d\n", mean, variance, data.size()); }
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
#include <stdio.h> #include <cuda.h> // include cuda-api header __global__ void helloFromGPU(void){ printf("Hello from GPU! %d\n", threadIdx.x); // accessing thread id } int main(void){ printf("Hello! from CPU\n"); helloFromGPU <<< 1,10 >>>(); cudaDeviceReset(); }
code for sm_80 Function : _Z12helloFromGPUv .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */ /* 0x000fc800078e00ff */ /*0010*/ S2R R8, SR_TID.X ; /* 0x0000000000087919 */ /* 0x000e220000002100 */ /*0020*/ IADD3 R1, R1, -0x8, RZ ; /* 0xfffffff801017810 */ /* 0x000fe20007ffe0ff */ /*0030*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x4][0x8] ; /* 0x01000200ff047624 */ /* 0x000fe200078e00ff */ /*0040*/ MOV R0, 0x0 ; /* 0x0000000000007802 */ /* 0x000fe20000000f00 */ /*0050*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x4][0xc] ; /* 0x01000300ff057624 */ /* 0x000fe200078e00ff */ /*0060*/ IADD3 R6, P0, R1, c[0x0][0x20], RZ ; /* 0x0000080001067a10 */ /* 0x000fe40007f1e0ff */ /*0070*/ LDC.64 R2, c[0x4][R0] ; /* 0x0100000000027b82 */ /* 0x0002a60000000a00 */ /*0080*/ IMAD.X R7, RZ, RZ, c[0x0][0x24], P0 ; /* 0x00000900ff077624 */ /* 0x000fe200000e06ff */ /*0090*/ STL [R1], R8 ; /* 0x0000000801007387 */ /* 0x0013e80000100800 */ /*00a0*/ LEPC R8 ; /* 0x000000000008734e */ /* 0x002fc60000000000 */ /*00b0*/ MOV R11, 0x120 ; /* 0x00000120000b7802 */ /* 0x000fe40000000f00 */ /*00c0*/ MOV R20, 0xa0 ; /* 0x000000a000147802 */ /* 0x000fc40000000f00 */ /*00d0*/ MOV R21, 0x0 ; /* 0x0000000000157802 */ /* 0x000fe40000000f00 */ /*00e0*/ MOV R0, 0x0 ; /* 0x0000000000007802 */ /* 0x000fe40000000f00 */ /*00f0*/ IADD3 R20, P0, P1, -R20, R11, R8 ; /* 0x0000000b14147210 */ /* 0x000fc8000791e108 */ /*0100*/ IADD3.X R21, ~R0, R21, R9, P0, P1 ; /* 0x0000001500157210 */ /* 0x000fc800007e2509 */ /*0110*/ CALL.ABS.NOINC R2 ; /* 0x0000000002007343 */ /* 0x004fea0003c00000 */ /*0120*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*0130*/ BRA 0x130; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0180*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0190*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01a0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01b0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01c0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01d0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*01f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
#include <stdio.h> #include <cuda.h> // include cuda-api header __global__ void helloFromGPU(void){ printf("Hello from GPU! %d\n", threadIdx.x); // accessing thread id } int main(void){ printf("Hello! from CPU\n"); helloFromGPU <<< 1,10 >>>(); cudaDeviceReset(); }
.file "tmpxft_000c0eae_00000000-6_threadID.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2060: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2060: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z31__device_stub__Z12helloFromGPUvv .type _Z31__device_stub__Z12helloFromGPUvv, @function _Z31__device_stub__Z12helloFromGPUvv: .LFB2082: .cfi_startproc endbr64 subq $88, %rsp .cfi_def_cfa_offset 96 movq %fs:40, %rax movq %rax, 72(%rsp) xorl %eax, %eax movl $1, 16(%rsp) movl $1, 20(%rsp) movl $1, 24(%rsp) movl $1, 28(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) leaq 8(%rsp), %rcx movq %rsp, %rdx leaq 28(%rsp), %rsi leaq 16(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 72(%rsp), %rax subq %fs:40, %rax jne .L8 addq $88, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 8(%rsp) .cfi_def_cfa_offset 104 pushq 8(%rsp) .cfi_def_cfa_offset 112 leaq 80(%rsp), %r9 movq 44(%rsp), %rcx movl 52(%rsp), %r8d movq 32(%rsp), %rsi movl 40(%rsp), %edx leaq _Z12helloFromGPUv(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 96 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2082: .size _Z31__device_stub__Z12helloFromGPUvv, .-_Z31__device_stub__Z12helloFromGPUvv .globl _Z12helloFromGPUv .type _Z12helloFromGPUv, @function _Z12helloFromGPUv: .LFB2083: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z31__device_stub__Z12helloFromGPUvv addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2083: .size _Z12helloFromGPUv, .-_Z12helloFromGPUv .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "Hello! from CPU\n" .text .globl main .type main, @function main: .LFB2057: .cfi_startproc endbr64 subq $40, %rsp .cfi_def_cfa_offset 48 leaq .LC0(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $10, 20(%rsp) movl $1, 24(%rsp) movl $1, 8(%rsp) movl $1, 12(%rsp) movl $0, %r9d movl $0, %r8d movq 20(%rsp), %rdx movl $1, %ecx movq 8(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L14 .L12: call cudaDeviceReset@PLT movl $0, %eax addq $40, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L14: .cfi_restore_state call _Z31__device_stub__Z12helloFromGPUvv jmp .L12 .cfi_endproc .LFE2057: .size main, .-main .section .rodata.str1.1 .LC1: .string "_Z12helloFromGPUv" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2085: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC1(%rip), %rdx movq %rdx, %rcx leaq _Z12helloFromGPUv(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2085: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
#include <stdio.h> #include <cuda.h> // include cuda-api header __global__ void helloFromGPU(void){ printf("Hello from GPU! %d\n", threadIdx.x); // accessing thread id } int main(void){ printf("Hello! from CPU\n"); helloFromGPU <<< 1,10 >>>(); cudaDeviceReset(); }
#include <stdio.h> #include <hip/hip_runtime.h> // include cuda-api header __global__ void helloFromGPU(void){ printf("Hello from GPU! %d\n", threadIdx.x); // accessing thread id } int main(void){ printf("Hello! from CPU\n"); helloFromGPU <<< 1,10 >>>(); hipDeviceReset(); }
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
#include <stdio.h> #include <hip/hip_runtime.h> // include cuda-api header __global__ void helloFromGPU(void){ printf("Hello from GPU! %d\n", threadIdx.x); // accessing thread id } int main(void){ printf("Hello! from CPU\n"); helloFromGPU <<< 1,10 >>>(); hipDeviceReset(); }
.text .file "threadID.hip" .globl _Z27__device_stub__helloFromGPUv # -- Begin function _Z27__device_stub__helloFromGPUv .p2align 4, 0x90 .type _Z27__device_stub__helloFromGPUv,@function _Z27__device_stub__helloFromGPUv: # @_Z27__device_stub__helloFromGPUv .cfi_startproc # %bb.0: subq $56, %rsp .cfi_def_cfa_offset 64 leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 48(%rsp), %r9 movl $_Z12helloFromGPUv, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $72, %rsp .cfi_adjust_cfa_offset -72 retq .Lfunc_end0: .size _Z27__device_stub__helloFromGPUv, .Lfunc_end0-_Z27__device_stub__helloFromGPUv .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: subq $56, %rsp .cfi_def_cfa_offset 64 movl $.Lstr, %edi callq puts@PLT movabsq $4294967297, %rdi # imm = 0x100000001 leaq 9(%rdi), %rdx movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_2 # %bb.1: leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 48(%rsp), %r9 movl $_Z12helloFromGPUv, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_2: callq hipDeviceReset xorl %eax, %eax addq $56, %rsp .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB2_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB2_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z12helloFromGPUv, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end2: .size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB3_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB3_2: retq .Lfunc_end3: .size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor .cfi_endproc # -- End function .type _Z12helloFromGPUv,@object # @_Z12helloFromGPUv .section .rodata,"a",@progbits .globl _Z12helloFromGPUv .p2align 3, 0x0 _Z12helloFromGPUv: .quad _Z27__device_stub__helloFromGPUv .size _Z12helloFromGPUv, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z12helloFromGPUv" .size .L__unnamed_1, 18 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .type .Lstr,@object # @str .section .rodata.str1.1,"aMS",@progbits,1 .Lstr: .asciz "Hello! from CPU" .size .Lstr, 16 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z27__device_stub__helloFromGPUv .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z12helloFromGPUv .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
.file "tmpxft_000c0eae_00000000-6_threadID.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2060: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2060: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z31__device_stub__Z12helloFromGPUvv .type _Z31__device_stub__Z12helloFromGPUvv, @function _Z31__device_stub__Z12helloFromGPUvv: .LFB2082: .cfi_startproc endbr64 subq $88, %rsp .cfi_def_cfa_offset 96 movq %fs:40, %rax movq %rax, 72(%rsp) xorl %eax, %eax movl $1, 16(%rsp) movl $1, 20(%rsp) movl $1, 24(%rsp) movl $1, 28(%rsp) movl $1, 32(%rsp) movl $1, 36(%rsp) leaq 8(%rsp), %rcx movq %rsp, %rdx leaq 28(%rsp), %rsi leaq 16(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 72(%rsp), %rax subq %fs:40, %rax jne .L8 addq $88, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 8(%rsp) .cfi_def_cfa_offset 104 pushq 8(%rsp) .cfi_def_cfa_offset 112 leaq 80(%rsp), %r9 movq 44(%rsp), %rcx movl 52(%rsp), %r8d movq 32(%rsp), %rsi movl 40(%rsp), %edx leaq _Z12helloFromGPUv(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 96 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2082: .size _Z31__device_stub__Z12helloFromGPUvv, .-_Z31__device_stub__Z12helloFromGPUvv .globl _Z12helloFromGPUv .type _Z12helloFromGPUv, @function _Z12helloFromGPUv: .LFB2083: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z31__device_stub__Z12helloFromGPUvv addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2083: .size _Z12helloFromGPUv, .-_Z12helloFromGPUv .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "Hello! from CPU\n" .text .globl main .type main, @function main: .LFB2057: .cfi_startproc endbr64 subq $40, %rsp .cfi_def_cfa_offset 48 leaq .LC0(%rip), %rsi movl $2, %edi movl $0, %eax call __printf_chk@PLT movl $10, 20(%rsp) movl $1, 24(%rsp) movl $1, 8(%rsp) movl $1, 12(%rsp) movl $0, %r9d movl $0, %r8d movq 20(%rsp), %rdx movl $1, %ecx movq 8(%rsp), %rdi movl $1, %esi call __cudaPushCallConfiguration@PLT testl %eax, %eax je .L14 .L12: call cudaDeviceReset@PLT movl $0, %eax addq $40, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L14: .cfi_restore_state call _Z31__device_stub__Z12helloFromGPUvv jmp .L12 .cfi_endproc .LFE2057: .size main, .-main .section .rodata.str1.1 .LC1: .string "_Z12helloFromGPUv" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2085: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC1(%rip), %rdx movq %rdx, %rcx leaq _Z12helloFromGPUv(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2085: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
.text .file "threadID.hip" .globl _Z27__device_stub__helloFromGPUv # -- Begin function _Z27__device_stub__helloFromGPUv .p2align 4, 0x90 .type _Z27__device_stub__helloFromGPUv,@function _Z27__device_stub__helloFromGPUv: # @_Z27__device_stub__helloFromGPUv .cfi_startproc # %bb.0: subq $56, %rsp .cfi_def_cfa_offset 64 leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 48(%rsp), %r9 movl $_Z12helloFromGPUv, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $72, %rsp .cfi_adjust_cfa_offset -72 retq .Lfunc_end0: .size _Z27__device_stub__helloFromGPUv, .Lfunc_end0-_Z27__device_stub__helloFromGPUv .cfi_endproc # -- End function .globl main # -- Begin function main .p2align 4, 0x90 .type main,@function main: # @main .cfi_startproc # %bb.0: subq $56, %rsp .cfi_def_cfa_offset 64 movl $.Lstr, %edi callq puts@PLT movabsq $4294967297, %rdi # imm = 0x100000001 leaq 9(%rdi), %rdx movl $1, %esi movl $1, %ecx xorl %r8d, %r8d xorl %r9d, %r9d callq __hipPushCallConfiguration testl %eax, %eax jne .LBB1_2 # %bb.1: leaq 32(%rsp), %rdi leaq 16(%rsp), %rsi leaq 8(%rsp), %rdx movq %rsp, %rcx callq __hipPopCallConfiguration movq 32(%rsp), %rsi movl 40(%rsp), %edx movq 16(%rsp), %rcx movl 24(%rsp), %r8d leaq 48(%rsp), %r9 movl $_Z12helloFromGPUv, %edi pushq (%rsp) .cfi_adjust_cfa_offset 8 pushq 16(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $16, %rsp .cfi_adjust_cfa_offset -16 .LBB1_2: callq hipDeviceReset xorl %eax, %eax addq $56, %rsp .cfi_def_cfa_offset 8 retq .Lfunc_end1: .size main, .Lfunc_end1-main .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB2_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB2_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z12helloFromGPUv, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end2: .size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB3_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB3_2: retq .Lfunc_end3: .size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor .cfi_endproc # -- End function .type _Z12helloFromGPUv,@object # @_Z12helloFromGPUv .section .rodata,"a",@progbits .globl _Z12helloFromGPUv .p2align 3, 0x0 _Z12helloFromGPUv: .quad _Z27__device_stub__helloFromGPUv .size _Z12helloFromGPUv, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z12helloFromGPUv" .size .L__unnamed_1, 18 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .type .Lstr,@object # @str .section .rodata.str1.1,"aMS",@progbits,1 .Lstr: .asciz "Hello! from CPU" .size .Lstr, 16 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z27__device_stub__helloFromGPUv .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z12helloFromGPUv .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
//pass //--blockDim=32 --gridDim=1 #include <cuda.h> __global__ void test_Prog(int *A,int *B, int N) { const int tid = threadIdx.x; int tmp=A[tid+1]; int tmp2=B[tid+1]; B[tid]=tmp2+tmp; A[tid]=tmp2-tmp; }
code for sm_80 Function : _Z9test_ProgPiS_i .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R4, SR_TID.X ; /* 0x0000000000047919 */ /* 0x000e220000002100 */ /*0020*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */ /* 0x000fe200000001ff */ /*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fd20000000a00 */ /*0040*/ IMAD.WIDE R2, R4, R5, c[0x0][0x160] ; /* 0x0000580004027625 */ /* 0x001fc800078e0205 */ /*0050*/ IMAD.WIDE R4, R4, R5, c[0x0][0x168] ; /* 0x00005a0004047625 */ /* 0x000fe200078e0205 */ /*0060*/ LDG.E R0, [R2.64+0x4] ; /* 0x0000040402007981 */ /* 0x000ea8000c1e1900 */ /*0070*/ LDG.E R7, [R4.64+0x4] ; /* 0x0000040404077981 */ /* 0x000ea4000c1e1900 */ /*0080*/ IADD3 R9, R0.reuse, R7.reuse, RZ ; /* 0x0000000700097210 */ /* 0x0c4fe40007ffe0ff */ /*0090*/ IADD3 R7, -R0, R7, RZ ; /* 0x0000000700077210 */ /* 0x000fc60007ffe1ff */ /*00a0*/ STG.E [R4.64], R9 ; /* 0x0000000904007986 */ /* 0x000fe8000c101904 */ /*00b0*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */ /* 0x000fe2000c101904 */ /*00c0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*00d0*/ BRA 0xd0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*00e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0100*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0110*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0120*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0130*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
//pass //--blockDim=32 --gridDim=1 #include <cuda.h> __global__ void test_Prog(int *A,int *B, int N) { const int tid = threadIdx.x; int tmp=A[tid+1]; int tmp2=B[tid+1]; B[tid]=tmp2+tmp; A[tid]=tmp2-tmp; }
.file "tmpxft_0011ef86_00000000-6_kernel.cudafe1.cpp" .text #APP #NO_APP .type _ZL26__cudaUnregisterBinaryUtilv, @function _ZL26__cudaUnregisterBinaryUtilv: .LFB2029: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaUnregisterFatBinary@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2029: .size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv .globl _Z31__device_stub__Z9test_ProgPiS_iPiS_i .type _Z31__device_stub__Z9test_ProgPiS_iPiS_i, @function _Z31__device_stub__Z9test_ProgPiS_iPiS_i: .LFB2051: .cfi_startproc endbr64 subq $136, %rsp .cfi_def_cfa_offset 144 movq %rdi, 24(%rsp) movq %rsi, 16(%rsp) movl %edx, 12(%rsp) movq %fs:40, %rax movq %rax, 120(%rsp) xorl %eax, %eax leaq 24(%rsp), %rax movq %rax, 96(%rsp) leaq 16(%rsp), %rax movq %rax, 104(%rsp) leaq 12(%rsp), %rax movq %rax, 112(%rsp) movl $1, 48(%rsp) movl $1, 52(%rsp) movl $1, 56(%rsp) movl $1, 60(%rsp) movl $1, 64(%rsp) movl $1, 68(%rsp) leaq 40(%rsp), %rcx leaq 32(%rsp), %rdx leaq 60(%rsp), %rsi leaq 48(%rsp), %rdi call __cudaPopCallConfiguration@PLT testl %eax, %eax je .L7 .L3: movq 120(%rsp), %rax subq %fs:40, %rax jne .L8 addq $136, %rsp .cfi_remember_state .cfi_def_cfa_offset 8 ret .L7: .cfi_restore_state pushq 40(%rsp) .cfi_def_cfa_offset 152 pushq 40(%rsp) .cfi_def_cfa_offset 160 leaq 112(%rsp), %r9 movq 76(%rsp), %rcx movl 84(%rsp), %r8d movq 64(%rsp), %rsi movl 72(%rsp), %edx leaq _Z9test_ProgPiS_i(%rip), %rdi call cudaLaunchKernel@PLT addq $16, %rsp .cfi_def_cfa_offset 144 jmp .L3 .L8: call __stack_chk_fail@PLT .cfi_endproc .LFE2051: .size _Z31__device_stub__Z9test_ProgPiS_iPiS_i, .-_Z31__device_stub__Z9test_ProgPiS_iPiS_i .globl _Z9test_ProgPiS_i .type _Z9test_ProgPiS_i, @function _Z9test_ProgPiS_i: .LFB2052: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 call _Z31__device_stub__Z9test_ProgPiS_iPiS_i addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2052: .size _Z9test_ProgPiS_i, .-_Z9test_ProgPiS_i .section .rodata.str1.1,"aMS",@progbits,1 .LC0: .string "_Z9test_ProgPiS_i" .text .type _ZL24__sti____cudaRegisterAllv, @function _ZL24__sti____cudaRegisterAllv: .LFB2054: .cfi_startproc endbr64 subq $8, %rsp .cfi_def_cfa_offset 16 leaq _ZL15__fatDeviceText(%rip), %rdi call __cudaRegisterFatBinary@PLT movq %rax, %rdi movq %rax, _ZL20__cudaFatCubinHandle(%rip) pushq $0 .cfi_def_cfa_offset 24 pushq $0 .cfi_def_cfa_offset 32 pushq $0 .cfi_def_cfa_offset 40 pushq $0 .cfi_def_cfa_offset 48 movl $0, %r9d movl $-1, %r8d leaq .LC0(%rip), %rdx movq %rdx, %rcx leaq _Z9test_ProgPiS_i(%rip), %rsi call __cudaRegisterFunction@PLT addq $32, %rsp .cfi_def_cfa_offset 16 movq _ZL20__cudaFatCubinHandle(%rip), %rdi call __cudaRegisterFatBinaryEnd@PLT leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi call atexit@PLT addq $8, %rsp .cfi_def_cfa_offset 8 ret .cfi_endproc .LFE2054: .size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv .section .init_array,"aw" .align 8 .quad _ZL24__sti____cudaRegisterAllv .section .nvFatBinSegment,"aw" .align 8 .type _ZL15__fatDeviceText, @object .size _ZL15__fatDeviceText, 24 _ZL15__fatDeviceText: .long 1180844977 .long 1 .quad fatbinData .quad 0 .local _ZL20__cudaFatCubinHandle .comm _ZL20__cudaFatCubinHandle,8,8 .ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0" .section .note.GNU-stack,"",@progbits .section .note.gnu.property,"a" .align 8 .long 1f - 0f .long 4f - 1f .long 5 0: .string "GNU" 1: .align 8 .long 0xc0000002 .long 3f - 2f 2: .long 0x3 3: .align 8 4:
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
//pass //--blockDim=32 --gridDim=1 #include <cuda.h> __global__ void test_Prog(int *A,int *B, int N) { const int tid = threadIdx.x; int tmp=A[tid+1]; int tmp2=B[tid+1]; B[tid]=tmp2+tmp; A[tid]=tmp2-tmp; }
//pass //--blockDim=32 --gridDim=1 #include <hip/hip_runtime.h> __global__ void test_Prog(int *A,int *B, int N) { const int tid = threadIdx.x; int tmp=A[tid+1]; int tmp2=B[tid+1]; B[tid]=tmp2+tmp; A[tid]=tmp2-tmp; }
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
//pass //--blockDim=32 --gridDim=1 #include <hip/hip_runtime.h> __global__ void test_Prog(int *A,int *B, int N) { const int tid = threadIdx.x; int tmp=A[tid+1]; int tmp2=B[tid+1]; B[tid]=tmp2+tmp; A[tid]=tmp2-tmp; }
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z9test_ProgPiS_i .globl _Z9test_ProgPiS_i .p2align 8 .type _Z9test_ProgPiS_i,@function _Z9test_ProgPiS_i: s_load_b128 s[0:3], s[0:1], 0x0 v_lshlrev_b32_e32 v0, 2, v0 s_delay_alu instid0(VALU_DEP_1) v_add_nc_u32_e32 v1, 4, v0 s_waitcnt lgkmcnt(0) s_clause 0x1 global_load_b32 v2, v1, s[0:1] global_load_b32 v1, v1, s[2:3] s_waitcnt vmcnt(0) v_add_nc_u32_e32 v3, v1, v2 v_sub_nc_u32_e32 v1, v1, v2 s_clause 0x1 global_store_b32 v0, v3, s[2:3] global_store_b32 v0, v1, s[0:1] s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z9test_ProgPiS_i .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 20 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 4 .amdhsa_next_free_sgpr 4 .amdhsa_reserve_vcc 0 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z9test_ProgPiS_i, .Lfunc_end0-_Z9test_ProgPiS_i .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: by_value .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 20 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z9test_ProgPiS_i .private_segment_fixed_size: 0 .sgpr_count: 4 .sgpr_spill_count: 0 .symbol: _Z9test_ProgPiS_i.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 4 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
//pass //--blockDim=32 --gridDim=1 #include <hip/hip_runtime.h> __global__ void test_Prog(int *A,int *B, int N) { const int tid = threadIdx.x; int tmp=A[tid+1]; int tmp2=B[tid+1]; B[tid]=tmp2+tmp; A[tid]=tmp2-tmp; }
.text .file "kernel.hip" .globl _Z24__device_stub__test_ProgPiS_i # -- Begin function _Z24__device_stub__test_ProgPiS_i .p2align 4, 0x90 .type _Z24__device_stub__test_ProgPiS_i,@function _Z24__device_stub__test_ProgPiS_i: # @_Z24__device_stub__test_ProgPiS_i .cfi_startproc # %bb.0: subq $104, %rsp .cfi_def_cfa_offset 112 movq %rdi, 72(%rsp) movq %rsi, 64(%rsp) movl %edx, 12(%rsp) leaq 72(%rsp), %rax movq %rax, 80(%rsp) leaq 64(%rsp), %rax movq %rax, 88(%rsp) leaq 12(%rsp), %rax movq %rax, 96(%rsp) leaq 48(%rsp), %rdi leaq 32(%rsp), %rsi leaq 24(%rsp), %rdx leaq 16(%rsp), %rcx callq __hipPopCallConfiguration movq 48(%rsp), %rsi movl 56(%rsp), %edx movq 32(%rsp), %rcx movl 40(%rsp), %r8d leaq 80(%rsp), %r9 movl $_Z9test_ProgPiS_i, %edi pushq 16(%rsp) .cfi_adjust_cfa_offset 8 pushq 32(%rsp) .cfi_adjust_cfa_offset 8 callq hipLaunchKernel addq $120, %rsp .cfi_adjust_cfa_offset -120 retq .Lfunc_end0: .size _Z24__device_stub__test_ProgPiS_i, .Lfunc_end0-_Z24__device_stub__test_ProgPiS_i .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_ctor .type __hip_module_ctor,@function __hip_module_ctor: # @__hip_module_ctor .cfi_startproc # %bb.0: subq $40, %rsp .cfi_def_cfa_offset 48 cmpq $0, __hip_gpubin_handle(%rip) jne .LBB1_2 # %bb.1: movl $__hip_fatbin_wrapper, %edi callq __hipRegisterFatBinary movq %rax, __hip_gpubin_handle(%rip) .LBB1_2: movq __hip_gpubin_handle(%rip), %rdi xorps %xmm0, %xmm0 movups %xmm0, 16(%rsp) movups %xmm0, (%rsp) movl $_Z9test_ProgPiS_i, %esi movl $.L__unnamed_1, %edx movl $.L__unnamed_1, %ecx movl $-1, %r8d xorl %r9d, %r9d callq __hipRegisterFunction movl $__hip_module_dtor, %edi addq $40, %rsp .cfi_def_cfa_offset 8 jmp atexit # TAILCALL .Lfunc_end1: .size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor .cfi_endproc # -- End function .p2align 4, 0x90 # -- Begin function __hip_module_dtor .type __hip_module_dtor,@function __hip_module_dtor: # @__hip_module_dtor .cfi_startproc # %bb.0: movq __hip_gpubin_handle(%rip), %rdi testq %rdi, %rdi je .LBB2_2 # %bb.1: pushq %rax .cfi_def_cfa_offset 16 callq __hipUnregisterFatBinary movq $0, __hip_gpubin_handle(%rip) addq $8, %rsp .cfi_def_cfa_offset 8 .LBB2_2: retq .Lfunc_end2: .size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor .cfi_endproc # -- End function .type _Z9test_ProgPiS_i,@object # @_Z9test_ProgPiS_i .section .rodata,"a",@progbits .globl _Z9test_ProgPiS_i .p2align 3, 0x0 _Z9test_ProgPiS_i: .quad _Z24__device_stub__test_ProgPiS_i .size _Z9test_ProgPiS_i, 8 .type .L__unnamed_1,@object # @0 .section .rodata.str1.1,"aMS",@progbits,1 .L__unnamed_1: .asciz "_Z9test_ProgPiS_i" .size .L__unnamed_1, 18 .type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper .section .hipFatBinSegment,"a",@progbits .p2align 3, 0x0 __hip_fatbin_wrapper: .long 1212764230 # 0x48495046 .long 1 # 0x1 .quad __hip_fatbin .quad 0 .size __hip_fatbin_wrapper, 24 .type __hip_gpubin_handle,@object # @__hip_gpubin_handle .local __hip_gpubin_handle .comm __hip_gpubin_handle,8,8 .section .init_array,"aw",@init_array .p2align 3, 0x0 .quad __hip_module_ctor .type __hip_cuid_,@object # @__hip_cuid_ .bss .globl __hip_cuid_ __hip_cuid_: .byte 0 # 0x0 .size __hip_cuid_, 1 .section ".linker-options","e",@llvm_linker_options .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym _Z24__device_stub__test_ProgPiS_i .addrsig_sym __hip_module_ctor .addrsig_sym __hip_module_dtor .addrsig_sym _Z9test_ProgPiS_i .addrsig_sym __hip_fatbin .addrsig_sym __hip_fatbin_wrapper .addrsig_sym __hip_cuid_
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
code for sm_80 Function : _Z9test_ProgPiS_i .headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)" /*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */ /* 0x000fe40000000f00 */ /*0010*/ S2R R4, SR_TID.X ; /* 0x0000000000047919 */ /* 0x000e220000002100 */ /*0020*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */ /* 0x000fe200000001ff */ /*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */ /* 0x000fd20000000a00 */ /*0040*/ IMAD.WIDE R2, R4, R5, c[0x0][0x160] ; /* 0x0000580004027625 */ /* 0x001fc800078e0205 */ /*0050*/ IMAD.WIDE R4, R4, R5, c[0x0][0x168] ; /* 0x00005a0004047625 */ /* 0x000fe200078e0205 */ /*0060*/ LDG.E R0, [R2.64+0x4] ; /* 0x0000040402007981 */ /* 0x000ea8000c1e1900 */ /*0070*/ LDG.E R7, [R4.64+0x4] ; /* 0x0000040404077981 */ /* 0x000ea4000c1e1900 */ /*0080*/ IADD3 R9, R0.reuse, R7.reuse, RZ ; /* 0x0000000700097210 */ /* 0x0c4fe40007ffe0ff */ /*0090*/ IADD3 R7, -R0, R7, RZ ; /* 0x0000000700077210 */ /* 0x000fc60007ffe1ff */ /*00a0*/ STG.E [R4.64], R9 ; /* 0x0000000904007986 */ /* 0x000fe8000c101904 */ /*00b0*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */ /* 0x000fe2000c101904 */ /*00c0*/ EXIT ; /* 0x000000000000794d */ /* 0x000fea0003800000 */ /*00d0*/ BRA 0xd0; /* 0xfffffff000007947 */ /* 0x000fc0000383ffff */ /*00e0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*00f0*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0100*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0110*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0120*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0130*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0140*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0150*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0160*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ /*0170*/ NOP; /* 0x0000000000007918 */ /* 0x000fc00000000000 */ ..........
.text .amdgcn_target "amdgcn-amd-amdhsa--gfx1100" .protected _Z9test_ProgPiS_i .globl _Z9test_ProgPiS_i .p2align 8 .type _Z9test_ProgPiS_i,@function _Z9test_ProgPiS_i: s_load_b128 s[0:3], s[0:1], 0x0 v_lshlrev_b32_e32 v0, 2, v0 s_delay_alu instid0(VALU_DEP_1) v_add_nc_u32_e32 v1, 4, v0 s_waitcnt lgkmcnt(0) s_clause 0x1 global_load_b32 v2, v1, s[0:1] global_load_b32 v1, v1, s[2:3] s_waitcnt vmcnt(0) v_add_nc_u32_e32 v3, v1, v2 v_sub_nc_u32_e32 v1, v1, v2 s_clause 0x1 global_store_b32 v0, v3, s[2:3] global_store_b32 v0, v1, s[0:1] s_nop 0 s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) s_endpgm .section .rodata,"a",@progbits .p2align 6, 0x0 .amdhsa_kernel _Z9test_ProgPiS_i .amdhsa_group_segment_fixed_size 0 .amdhsa_private_segment_fixed_size 0 .amdhsa_kernarg_size 20 .amdhsa_user_sgpr_count 15 .amdhsa_user_sgpr_dispatch_ptr 0 .amdhsa_user_sgpr_queue_ptr 0 .amdhsa_user_sgpr_kernarg_segment_ptr 1 .amdhsa_user_sgpr_dispatch_id 0 .amdhsa_user_sgpr_private_segment_size 0 .amdhsa_wavefront_size32 1 .amdhsa_uses_dynamic_stack 0 .amdhsa_enable_private_segment 0 .amdhsa_system_sgpr_workgroup_id_x 1 .amdhsa_system_sgpr_workgroup_id_y 0 .amdhsa_system_sgpr_workgroup_id_z 0 .amdhsa_system_sgpr_workgroup_info 0 .amdhsa_system_vgpr_workitem_id 0 .amdhsa_next_free_vgpr 4 .amdhsa_next_free_sgpr 4 .amdhsa_reserve_vcc 0 .amdhsa_float_round_mode_32 0 .amdhsa_float_round_mode_16_64 0 .amdhsa_float_denorm_mode_32 3 .amdhsa_float_denorm_mode_16_64 3 .amdhsa_dx10_clamp 1 .amdhsa_ieee_mode 1 .amdhsa_fp16_overflow 0 .amdhsa_workgroup_processor_mode 1 .amdhsa_memory_ordered 1 .amdhsa_forward_progress 0 .amdhsa_shared_vgpr_count 0 .amdhsa_exception_fp_ieee_invalid_op 0 .amdhsa_exception_fp_denorm_src 0 .amdhsa_exception_fp_ieee_div_zero 0 .amdhsa_exception_fp_ieee_overflow 0 .amdhsa_exception_fp_ieee_underflow 0 .amdhsa_exception_fp_ieee_inexact 0 .amdhsa_exception_int_div_zero 0 .end_amdhsa_kernel .text .Lfunc_end0: .size _Z9test_ProgPiS_i, .Lfunc_end0-_Z9test_ProgPiS_i .section .AMDGPU.csdata,"",@progbits .text .p2alignl 7, 3214868480 .fill 96, 4, 3214868480 .type __hip_cuid_,@object .section .bss,"aw",@nobits .globl __hip_cuid_ __hip_cuid_: .byte 0 .size __hip_cuid_, 1 .ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)" .section ".note.GNU-stack","",@progbits .addrsig .addrsig_sym __hip_cuid_ .amdgpu_metadata --- amdhsa.kernels: - .args: - .address_space: global .offset: 0 .size: 8 .value_kind: global_buffer - .address_space: global .offset: 8 .size: 8 .value_kind: global_buffer - .offset: 16 .size: 4 .value_kind: by_value .group_segment_fixed_size: 0 .kernarg_segment_align: 8 .kernarg_segment_size: 20 .language: OpenCL C .language_version: - 2 - 0 .max_flat_workgroup_size: 1024 .name: _Z9test_ProgPiS_i .private_segment_fixed_size: 0 .sgpr_count: 4 .sgpr_spill_count: 0 .symbol: _Z9test_ProgPiS_i.kd .uniform_work_group_size: 1 .uses_dynamic_stack: false .vgpr_count: 4 .vgpr_spill_count: 0 .wavefront_size: 32 .workgroup_processor_mode: 1 amdhsa.target: amdgcn-amd-amdhsa--gfx1100 amdhsa.version: - 1 - 2 ... .end_amdgpu_metadata