system stringclasses 7 values | input stringlengths 10 94.5k | output stringlengths 14 55.5k |
|---|---|---|
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include<stdio.h>
#include<iostream>
#include<hip/hip_runtime.h>
int main(int argc, char* argv[]){
if(argc!=3){
std::cout<<"Usage: "<<argv[0]<<" Numblocks BlockDim\n";
return 0;
}
int nBlocks= atoi(argv[1]);
int bDim = atoi(argv[2]);
if(bDim>1024){
std::cout<<"BlockDim should be less than or equal to 1024\n";
return 0;
}
std::cout<<"Lets do this!!!\n";
} | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include<stdio.h>
#include<iostream>
#include<hip/hip_runtime.h>
int main(int argc, char* argv[]){
if(argc!=3){
std::cout<<"Usage: "<<argv[0]<<" Numblocks BlockDim\n";
return 0;
}
int nBlocks= atoi(argv[1]);
int bDim = atoi(argv[2]);
if(bDim>1024){
std::cout<<"BlockDim should be less than or equal to 1024\n";
return 0;
}
std::cout<<"Lets do this!!!\n";
} | .text
.file "example1.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset %rbx, -16
movq %rsi, %rbx
cmpl $3, %edi
jne .LBB0_1
# %bb.5:
movq 16(%rbx), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movl $_ZSt4cout, %edi
cmpl $1025, %eax # imm = 0x401
jl .LBB0_7
# %bb.6:
movl $.L.str.2, %esi
movl $46, %edx
jmp .LBB0_8
.LBB0_1:
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $7, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq (%rbx), %rbx
testq %rbx, %rbx
je .LBB0_2
# %bb.3:
movq %rbx, %rdi
callq strlen
movl $_ZSt4cout, %edi
movq %rbx, %rsi
movq %rax, %rdx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
jmp .LBB0_4
.LBB0_7:
movl $.L.str.3, %esi
movl $16, %edx
jmp .LBB0_8
.LBB0_2:
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
leaq _ZSt4cout(%rax), %rdi
movl _ZSt4cout+32(%rax), %esi
orl $1, %esi
callq _ZNSt9basic_iosIcSt11char_traitsIcEE5clearESt12_Ios_Iostate
.LBB0_4: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit
movl $_ZSt4cout, %edi
movl $.L.str.1, %esi
movl $20, %edx
.LBB0_8:
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
xorl %eax, %eax
popq %rbx
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size main, .Lfunc_end0-main
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Usage: "
.size .L.str, 8
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz " Numblocks BlockDim\n"
.size .L.str.1, 21
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "BlockDim should be less than or equal to 1024\n"
.size .L.str.2, 47
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "Lets do this!!!\n"
.size .L.str.3, 17
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80 | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0007504c_00000000-6_example1.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3672:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3672:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Usage: "
.LC1:
.string " Numblocks BlockDim\n"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC2:
.string "BlockDim should be less than or equal to 1024\n"
.section .rodata.str1.1
.LC3:
.string "Lets do this!!!\n"
.text
.globl main
.type main, @function
main:
.LFB3669:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
movq %rsi, %rbx
cmpl $3, %edi
je .L4
leaq .LC0(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movq (%rbx), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
leaq .LC1(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
.L5:
movl $0, %eax
popq %rbx
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L4:
.cfi_restore_state
movq 8(%rsi), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq 16(%rbx), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
cmpl $1024, %eax
jg .L8
leaq .LC3(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
jmp .L5
.L8:
leaq .LC2(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
jmp .L5
.cfi_endproc
.LFE3669:
.size main, .-main
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3695:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3695:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "example1.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset %rbx, -16
movq %rsi, %rbx
cmpl $3, %edi
jne .LBB0_1
# %bb.5:
movq 16(%rbx), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movl $_ZSt4cout, %edi
cmpl $1025, %eax # imm = 0x401
jl .LBB0_7
# %bb.6:
movl $.L.str.2, %esi
movl $46, %edx
jmp .LBB0_8
.LBB0_1:
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $7, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq (%rbx), %rbx
testq %rbx, %rbx
je .LBB0_2
# %bb.3:
movq %rbx, %rdi
callq strlen
movl $_ZSt4cout, %edi
movq %rbx, %rsi
movq %rax, %rdx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
jmp .LBB0_4
.LBB0_7:
movl $.L.str.3, %esi
movl $16, %edx
jmp .LBB0_8
.LBB0_2:
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
leaq _ZSt4cout(%rax), %rdi
movl _ZSt4cout+32(%rax), %esi
orl $1, %esi
callq _ZNSt9basic_iosIcSt11char_traitsIcEE5clearESt12_Ios_Iostate
.LBB0_4: # %_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc.exit
movl $_ZSt4cout, %edi
movl $.L.str.1, %esi
movl $20, %edx
.LBB0_8:
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
xorl %eax, %eax
popq %rbx
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size main, .Lfunc_end0-main
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Usage: "
.size .L.str, 8
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz " Numblocks BlockDim\n"
.size .L.str.1, 21
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "BlockDim should be less than or equal to 1024\n"
.size .L.str.2, 47
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "Lets do this!!!\n"
.size .L.str.3, 17
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | // This example is taken from https://devblogs.nvidia.com/even-easier-introduction-cuda/
#include <iostream>
#include <stdio.h>
#include <math.h>
#include <sys/time.h>
// get_walltime function for time measurement
double get_walltime_(double* wcTime) {
struct timeval tp;
gettimeofday(&tp, NULL);
*wcTime = (double)(tp.tv_sec + tp.tv_usec/1000000.0);
return 0.0;
}
void get_walltime(double* wcTime) {
get_walltime_(wcTime);
}
// CUDA Kernel function to add the elements of two arrays
__global__
void add(int n, float *x, float *y)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
//printf("Thread ID: %d \t Block ID: %d\n", threadIdx.x, blockIdx.x);
for (int i = index; i < n; i+=stride)
y[i] = x[i] + y[i];
}
int main(void)
{
int N = 1<<20;
float *x, *y;
double delta, finish, start;
double flops, nd;
// Allocate Unified Memory – accessible from CPU or GPU
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on 1M elements on the GPU
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
// start time measurement
get_walltime(&start);
// execute the CUDA kernel function
add<<<numBlocks, blockSize>>>(N, x, y);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
// stop time measurement, why here and not directly after the kernel call?
get_walltime(&finish);
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// calculating time delta and Mflops
delta = (finish - start);
nd = (double) N;
flops = nd/(delta * 1000000.);
std::cout << ">>>>> finish: " << finish << std::endl;
std::cout << ">>>>> delta: " << delta << std::endl;
std::cout << ">>>>> Mflops: " << flops << std::endl;
// Free memory
cudaFree(x);
cudaFree(y);
return 0;
} | code for sm_80
Function : _Z3addiPfS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e280000002500 */
/*0020*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R3, R3, c[0x0][0x0], R0 ; /* 0x0000000003037a24 */
/* 0x001fca00078e0200 */
/*0040*/ ISETP.GE.AND P0, PT, R3, c[0x0][0x160], PT ; /* 0x0000580003007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ MOV R0, c[0x0][0x0] ; /* 0x0000000000007a02 */
/* 0x000fe20000000f00 */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0080*/ BSSY B0, 0x320 ; /* 0x0000029000007945 */
/* 0x000fe60003800000 */
/*0090*/ IMAD R0, R0, c[0x0][0xc], RZ ; /* 0x0000030000007a24 */
/* 0x000fc800078e02ff */
/*00a0*/ I2F.U32.RP R6, R0 ; /* 0x0000000000067306 */
/* 0x000e220000209000 */
/*00b0*/ IMAD.MOV R9, RZ, RZ, -R0 ; /* 0x000000ffff097224 */
/* 0x000fe200078e0a00 */
/*00c0*/ IADD3 R2, R0.reuse, R3, RZ ; /* 0x0000000300027210 */
/* 0x040fe40007ffe0ff */
/*00d0*/ ISETP.NE.U32.AND P2, PT, R0, RZ, PT ; /* 0x000000ff0000720c */
/* 0x000fe40003f45070 */
/*00e0*/ LOP3.LUT R7, RZ, R2, RZ, 0x33, !PT ; /* 0x00000002ff077212 */
/* 0x000fc800078e33ff */
/*00f0*/ IADD3 R7, R7, c[0x0][0x160], R0 ; /* 0x0000580007077a10 */
/* 0x000fe20007ffe000 */
/*0100*/ MUFU.RCP R6, R6 ; /* 0x0000000600067308 */
/* 0x001e240000001000 */
/*0110*/ IADD3 R4, R6, 0xffffffe, RZ ; /* 0x0ffffffe06047810 */
/* 0x001fcc0007ffe0ff */
/*0120*/ F2I.FTZ.U32.TRUNC.NTZ R5, R4 ; /* 0x0000000400057305 */
/* 0x000064000021f000 */
/*0130*/ HFMA2.MMA R4, -RZ, RZ, 0, 0 ; /* 0x00000000ff047435 */
/* 0x001fe200000001ff */
/*0140*/ IMAD R9, R9, R5, RZ ; /* 0x0000000509097224 */
/* 0x002fd200078e02ff */
/*0150*/ IMAD.HI.U32 R2, R5, R9, R4 ; /* 0x0000000905027227 */
/* 0x000fcc00078e0004 */
/*0160*/ IMAD.HI.U32 R2, R2, R7, RZ ; /* 0x0000000702027227 */
/* 0x000fc800078e00ff */
/*0170*/ IMAD.MOV R4, RZ, RZ, -R2 ; /* 0x000000ffff047224 */
/* 0x000fc800078e0a02 */
/*0180*/ IMAD R7, R0, R4, R7 ; /* 0x0000000400077224 */
/* 0x000fca00078e0207 */
/*0190*/ ISETP.GE.U32.AND P0, PT, R7, R0, PT ; /* 0x000000000700720c */
/* 0x000fda0003f06070 */
/*01a0*/ @P0 IADD3 R7, -R0, R7, RZ ; /* 0x0000000700070210 */
/* 0x000fe40007ffe1ff */
/*01b0*/ @P0 IADD3 R2, R2, 0x1, RZ ; /* 0x0000000102020810 */
/* 0x000fe40007ffe0ff */
/*01c0*/ ISETP.GE.U32.AND P1, PT, R7, R0, PT ; /* 0x000000000700720c */
/* 0x000fda0003f26070 */
/*01d0*/ @P1 IADD3 R2, R2, 0x1, RZ ; /* 0x0000000102021810 */
/* 0x000fe40007ffe0ff */
/*01e0*/ @!P2 LOP3.LUT R2, RZ, R0, RZ, 0x33, !PT ; /* 0x00000000ff02a212 */
/* 0x000fc800078e33ff */
/*01f0*/ IADD3 R4, R2.reuse, 0x1, RZ ; /* 0x0000000102047810 */
/* 0x040fe40007ffe0ff */
/*0200*/ ISETP.GE.U32.AND P1, PT, R2, 0x3, PT ; /* 0x000000030200780c */
/* 0x000fe40003f26070 */
/*0210*/ LOP3.LUT P0, R4, R4, 0x3, RZ, 0xc0, !PT ; /* 0x0000000304047812 */
/* 0x000fda000780c0ff */
/*0220*/ @!P0 BRA 0x310 ; /* 0x000000e000008947 */
/* 0x000fea0003800000 */
/*0230*/ MOV R6, 0x4 ; /* 0x0000000400067802 */
/* 0x000fe20000000f00 */
/*0240*/ IMAD.MOV.U32 R2, RZ, RZ, R4 ; /* 0x000000ffff027224 */
/* 0x000fc800078e0004 */
/*0250*/ IMAD.WIDE R4, R3, R6, c[0x0][0x170] ; /* 0x00005c0003047625 */
/* 0x000fc800078e0206 */
/*0260*/ IMAD.WIDE R6, R3, R6, c[0x0][0x168] ; /* 0x00005a0003067625 */
/* 0x000fc800078e0206 */
/*0270*/ LDG.E R8, [R4.64] ; /* 0x0000000404087981 */
/* 0x000ea8000c1e1900 */
/*0280*/ LDG.E R9, [R6.64] ; /* 0x0000000406097981 */
/* 0x0000a2000c1e1900 */
/*0290*/ IADD3 R2, R2, -0x1, RZ ; /* 0xffffffff02027810 */
/* 0x000fe40007ffe0ff */
/*02a0*/ IADD3 R3, R0, R3, RZ ; /* 0x0000000300037210 */
/* 0x000fe40007ffe0ff */
/*02b0*/ ISETP.NE.AND P0, PT, R2, RZ, PT ; /* 0x000000ff0200720c */
/* 0x000fe20003f05270 */
/*02c0*/ IMAD.WIDE R6, R0, 0x4, R6 ; /* 0x0000000400067825 */
/* 0x001fc800078e0206 */
/*02d0*/ FADD R9, R8, R9 ; /* 0x0000000908097221 */
/* 0x004fca0000000000 */
/*02e0*/ STG.E [R4.64], R9 ; /* 0x0000000904007986 */
/* 0x0001e4000c101904 */
/*02f0*/ IMAD.WIDE R4, R0, 0x4, R4 ; /* 0x0000000400047825 */
/* 0x001fe200078e0204 */
/*0300*/ @P0 BRA 0x270 ; /* 0xffffff6000000947 */
/* 0x000fea000383ffff */
/*0310*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0320*/ @!P1 EXIT ; /* 0x000000000000994d */
/* 0x000fea0003800000 */
/*0330*/ HFMA2.MMA R6, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff067435 */
/* 0x002fd400000001ff */
/*0340*/ IMAD.WIDE R4, R3, R6, c[0x0][0x168] ; /* 0x00005a0003047625 */
/* 0x000fc800078e0206 */
/*0350*/ IMAD.WIDE R6, R3, R6, c[0x0][0x170] ; /* 0x00005c0003067625 */
/* 0x000fe200078e0206 */
/*0360*/ LDG.E R9, [R4.64] ; /* 0x0000000404097981 */
/* 0x000ea8000c1e1900 */
/*0370*/ LDG.E R2, [R6.64] ; /* 0x0000000406027981 */
/* 0x000ea2000c1e1900 */
/*0380*/ IMAD.WIDE R10, R0, 0x4, R6 ; /* 0x00000004000a7825 */
/* 0x000fc800078e0206 */
/*0390*/ FADD R17, R2, R9 ; /* 0x0000000902117221 */
/* 0x004fe40000000000 */
/*03a0*/ IMAD.WIDE R8, R0, 0x4, R4 ; /* 0x0000000400087825 */
/* 0x000fc600078e0204 */
/*03b0*/ STG.E [R6.64], R17 ; /* 0x0000001106007986 */
/* 0x0001e8000c101904 */
/*03c0*/ LDG.E R2, [R10.64] ; /* 0x000000040a027981 */
/* 0x000ea8000c1e1900 */
/*03d0*/ LDG.E R13, [R8.64] ; /* 0x00000004080d7981 */
/* 0x000ea2000c1e1900 */
/*03e0*/ IMAD.WIDE R14, R0, 0x4, R10 ; /* 0x00000004000e7825 */
/* 0x000fc800078e020a */
/*03f0*/ FADD R19, R2, R13 ; /* 0x0000000d02137221 */
/* 0x004fe40000000000 */
/*0400*/ IMAD.WIDE R12, R0, 0x4, R8 ; /* 0x00000004000c7825 */
/* 0x000fc600078e0208 */
/*0410*/ STG.E [R10.64], R19 ; /* 0x000000130a007986 */
/* 0x0003e8000c101904 */
/*0420*/ LDG.E R2, [R14.64] ; /* 0x000000040e027981 */
/* 0x000ea8000c1e1900 */
/*0430*/ LDG.E R5, [R12.64] ; /* 0x000000040c057981 */
/* 0x000ea2000c1e1900 */
/*0440*/ IMAD.WIDE R6, R0, 0x4, R14 ; /* 0x0000000400067825 */
/* 0x001fc800078e020e */
/*0450*/ FADD R21, R2, R5 ; /* 0x0000000502157221 */
/* 0x004fe40000000000 */
/*0460*/ IMAD.WIDE R4, R0, 0x4, R12 ; /* 0x0000000400047825 */
/* 0x000fc600078e020c */
/*0470*/ STG.E [R14.64], R21 ; /* 0x000000150e007986 */
/* 0x0003e8000c101904 */
/*0480*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea8000c1e1900 */
/*0490*/ LDG.E R2, [R6.64] ; /* 0x0000000406027981 */
/* 0x000ea2000c1e1900 */
/*04a0*/ IADD3 R3, R0, R3, R0 ; /* 0x0000000300037210 */
/* 0x000fc80007ffe000 */
/*04b0*/ IADD3 R3, R0, R3, R0 ; /* 0x0000000300037210 */
/* 0x000fc80007ffe000 */
/*04c0*/ ISETP.GE.AND P0, PT, R3, c[0x0][0x160], PT ; /* 0x0000580003007a0c */
/* 0x000fe20003f06270 */
/*04d0*/ FADD R9, R2, R5 ; /* 0x0000000502097221 */
/* 0x004fca0000000000 */
/*04e0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x0003ee000c101904 */
/*04f0*/ @!P0 BRA 0x330 ; /* 0xfffffe3000008947 */
/* 0x000fea000383ffff */
/*0500*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0510*/ BRA 0x510; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0520*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0530*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0540*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0550*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0560*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0570*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0580*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0590*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | // This example is taken from https://devblogs.nvidia.com/even-easier-introduction-cuda/
#include <iostream>
#include <stdio.h>
#include <math.h>
#include <sys/time.h>
// get_walltime function for time measurement
double get_walltime_(double* wcTime) {
struct timeval tp;
gettimeofday(&tp, NULL);
*wcTime = (double)(tp.tv_sec + tp.tv_usec/1000000.0);
return 0.0;
}
void get_walltime(double* wcTime) {
get_walltime_(wcTime);
}
// CUDA Kernel function to add the elements of two arrays
__global__
void add(int n, float *x, float *y)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
//printf("Thread ID: %d \t Block ID: %d\n", threadIdx.x, blockIdx.x);
for (int i = index; i < n; i+=stride)
y[i] = x[i] + y[i];
}
int main(void)
{
int N = 1<<20;
float *x, *y;
double delta, finish, start;
double flops, nd;
// Allocate Unified Memory – accessible from CPU or GPU
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on 1M elements on the GPU
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
// start time measurement
get_walltime(&start);
// execute the CUDA kernel function
add<<<numBlocks, blockSize>>>(N, x, y);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
// stop time measurement, why here and not directly after the kernel call?
get_walltime(&finish);
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// calculating time delta and Mflops
delta = (finish - start);
nd = (double) N;
flops = nd/(delta * 1000000.);
std::cout << ">>>>> finish: " << finish << std::endl;
std::cout << ">>>>> delta: " << delta << std::endl;
std::cout << ">>>>> Mflops: " << flops << std::endl;
// Free memory
cudaFree(x);
cudaFree(y);
return 0;
} | .file "tmpxft_0011e84f_00000000-6_add.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3674:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3674:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z13get_walltime_Pd
.type _Z13get_walltime_Pd, @function
_Z13get_walltime_Pd:
.LFB3669:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
subq $32, %rsp
.cfi_def_cfa_offset 48
movq %rdi, %rbx
movq %fs:40, %rax
movq %rax, 24(%rsp)
xorl %eax, %eax
movq %rsp, %rdi
movl $0, %esi
call gettimeofday@PLT
pxor %xmm0, %xmm0
cvtsi2sdq 8(%rsp), %xmm0
divsd .LC0(%rip), %xmm0
pxor %xmm1, %xmm1
cvtsi2sdq (%rsp), %xmm1
addsd %xmm1, %xmm0
movsd %xmm0, (%rbx)
movq 24(%rsp), %rax
subq %fs:40, %rax
jne .L6
pxor %xmm0, %xmm0
addq $32, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
ret
.L6:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3669:
.size _Z13get_walltime_Pd, .-_Z13get_walltime_Pd
.globl _Z12get_walltimePd
.type _Z12get_walltimePd, @function
_Z12get_walltimePd:
.LFB3670:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z13get_walltime_Pd
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3670:
.size _Z12get_walltimePd, .-_Z12get_walltimePd
.globl _Z25__device_stub__Z3addiPfS_iPfS_
.type _Z25__device_stub__Z3addiPfS_iPfS_, @function
_Z25__device_stub__Z3addiPfS_iPfS_:
.LFB3696:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movl %edi, 28(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L13
.L9:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L14
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L13:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z3addiPfS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L9
.L14:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3696:
.size _Z25__device_stub__Z3addiPfS_iPfS_, .-_Z25__device_stub__Z3addiPfS_iPfS_
.globl _Z3addiPfS_
.type _Z3addiPfS_, @function
_Z3addiPfS_:
.LFB3697:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z25__device_stub__Z3addiPfS_iPfS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3697:
.size _Z3addiPfS_, .-_Z3addiPfS_
.section .rodata.str1.1,"aMS",@progbits,1
.LC7:
.string "Max error: "
.LC9:
.string ">>>>> finish: "
.LC10:
.string ">>>>> delta: "
.LC11:
.string ">>>>> Mflops: "
.text
.globl main
.type main, @function
main:
.LFB3671:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $80, %rsp
.cfi_def_cfa_offset 112
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
leaq 16(%rsp), %rdi
movl $1, %edx
movl $4194304, %esi
call cudaMallocManaged@PLT
leaq 24(%rsp), %rdi
movl $1, %edx
movl $4194304, %esi
call cudaMallocManaged@PLT
movl $0, %eax
movss .LC3(%rip), %xmm1
movss .LC4(%rip), %xmm0
.L18:
movq 16(%rsp), %rdx
movss %xmm1, (%rdx,%rax)
movq 24(%rsp), %rdx
movss %xmm0, (%rdx,%rax)
addq $4, %rax
cmpq $4194304, %rax
jne .L18
leaq 32(%rsp), %rdi
call _Z13get_walltime_Pd
movl $256, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $4096, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 56(%rsp), %rdx
movl $1, %ecx
movq 44(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L25
.L19:
call cudaDeviceSynchronize@PLT
leaq 56(%rsp), %rdi
call _Z13get_walltime_Pd
movq 24(%rsp), %rbx
leaq 4194304(%rbx), %rbp
movl $0x00000000, 8(%rsp)
.L20:
movss (%rbx), %xmm0
subss .LC5(%rip), %xmm0
andps .LC6(%rip), %xmm0
movss 8(%rsp), %xmm1
call fmaxf@PLT
movss %xmm0, 8(%rsp)
addq $4, %rbx
cmpq %rbp, %rbx
jne .L20
leaq .LC7(%rip), %rsi
leaq _ZSt4cout(%rip), %rbx
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
pxor %xmm0, %xmm0
cvtss2sd 8(%rsp), %xmm0
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movq 56(%rsp), %r12
movq %r12, %xmm2
subsd 32(%rsp), %xmm2
movsd %xmm2, 8(%rsp)
movapd %xmm2, %xmm3
mulsd .LC0(%rip), %xmm3
movsd .LC8(%rip), %xmm1
divsd %xmm3, %xmm1
movq %xmm1, %rbp
leaq .LC9(%rip), %rsi
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movq %r12, %xmm0
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
leaq .LC10(%rip), %rsi
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movsd 8(%rsp), %xmm0
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
leaq .LC11(%rip), %rsi
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movq %rbp, %xmm0
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L26
movl $0, %eax
addq $80, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L25:
.cfi_restore_state
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
movl $1048576, %edi
call _Z25__device_stub__Z3addiPfS_iPfS_
jmp .L19
.L26:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3671:
.size main, .-main
.section .rodata.str1.1
.LC12:
.string "_Z3addiPfS_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3699:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC12(%rip), %rdx
movq %rdx, %rcx
leaq _Z3addiPfS_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3699:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC0:
.long 0
.long 1093567616
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC3:
.long 1065353216
.align 4
.LC4:
.long 1073741824
.align 4
.LC5:
.long 1077936128
.section .rodata.cst16,"aM",@progbits,16
.align 16
.LC6:
.long 2147483647
.long 0
.long 0
.long 0
.section .rodata.cst8
.align 8
.LC8:
.long 0
.long 1093664768
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | // This example is taken from https://devblogs.nvidia.com/even-easier-introduction-cuda/
#include <iostream>
#include <stdio.h>
#include <math.h>
#include <sys/time.h>
// get_walltime function for time measurement
double get_walltime_(double* wcTime) {
struct timeval tp;
gettimeofday(&tp, NULL);
*wcTime = (double)(tp.tv_sec + tp.tv_usec/1000000.0);
return 0.0;
}
void get_walltime(double* wcTime) {
get_walltime_(wcTime);
}
// CUDA Kernel function to add the elements of two arrays
__global__
void add(int n, float *x, float *y)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
//printf("Thread ID: %d \t Block ID: %d\n", threadIdx.x, blockIdx.x);
for (int i = index; i < n; i+=stride)
y[i] = x[i] + y[i];
}
int main(void)
{
int N = 1<<20;
float *x, *y;
double delta, finish, start;
double flops, nd;
// Allocate Unified Memory – accessible from CPU or GPU
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on 1M elements on the GPU
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
// start time measurement
get_walltime(&start);
// execute the CUDA kernel function
add<<<numBlocks, blockSize>>>(N, x, y);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
// stop time measurement, why here and not directly after the kernel call?
get_walltime(&finish);
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// calculating time delta and Mflops
delta = (finish - start);
nd = (double) N;
flops = nd/(delta * 1000000.);
std::cout << ">>>>> finish: " << finish << std::endl;
std::cout << ">>>>> delta: " << delta << std::endl;
std::cout << ">>>>> Mflops: " << flops << std::endl;
// Free memory
cudaFree(x);
cudaFree(y);
return 0;
} | // This example is taken from https://devblogs.nvidia.com/even-easier-introduction-cuda/
#include <hip/hip_runtime.h>
#include <iostream>
#include <stdio.h>
#include <math.h>
#include <sys/time.h>
// get_walltime function for time measurement
double get_walltime_(double* wcTime) {
struct timeval tp;
gettimeofday(&tp, NULL);
*wcTime = (double)(tp.tv_sec + tp.tv_usec/1000000.0);
return 0.0;
}
void get_walltime(double* wcTime) {
get_walltime_(wcTime);
}
// CUDA Kernel function to add the elements of two arrays
__global__
void add(int n, float *x, float *y)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
//printf("Thread ID: %d \t Block ID: %d\n", threadIdx.x, blockIdx.x);
for (int i = index; i < n; i+=stride)
y[i] = x[i] + y[i];
}
int main(void)
{
int N = 1<<20;
float *x, *y;
double delta, finish, start;
double flops, nd;
// Allocate Unified Memory – accessible from CPU or GPU
hipMallocManaged(&x, N*sizeof(float));
hipMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on 1M elements on the GPU
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
// start time measurement
get_walltime(&start);
// execute the CUDA kernel function
add<<<numBlocks, blockSize>>>(N, x, y);
// Wait for GPU to finish before accessing on host
hipDeviceSynchronize();
// stop time measurement, why here and not directly after the kernel call?
get_walltime(&finish);
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// calculating time delta and Mflops
delta = (finish - start);
nd = (double) N;
flops = nd/(delta * 1000000.);
std::cout << ">>>>> finish: " << finish << std::endl;
std::cout << ">>>>> delta: " << delta << std::endl;
std::cout << ">>>>> Mflops: " << flops << std::endl;
// Free memory
hipFree(x);
hipFree(y);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | // This example is taken from https://devblogs.nvidia.com/even-easier-introduction-cuda/
#include <hip/hip_runtime.h>
#include <iostream>
#include <stdio.h>
#include <math.h>
#include <sys/time.h>
// get_walltime function for time measurement
double get_walltime_(double* wcTime) {
struct timeval tp;
gettimeofday(&tp, NULL);
*wcTime = (double)(tp.tv_sec + tp.tv_usec/1000000.0);
return 0.0;
}
void get_walltime(double* wcTime) {
get_walltime_(wcTime);
}
// CUDA Kernel function to add the elements of two arrays
__global__
void add(int n, float *x, float *y)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
//printf("Thread ID: %d \t Block ID: %d\n", threadIdx.x, blockIdx.x);
for (int i = index; i < n; i+=stride)
y[i] = x[i] + y[i];
}
int main(void)
{
int N = 1<<20;
float *x, *y;
double delta, finish, start;
double flops, nd;
// Allocate Unified Memory – accessible from CPU or GPU
hipMallocManaged(&x, N*sizeof(float));
hipMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on 1M elements on the GPU
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
// start time measurement
get_walltime(&start);
// execute the CUDA kernel function
add<<<numBlocks, blockSize>>>(N, x, y);
// Wait for GPU to finish before accessing on host
hipDeviceSynchronize();
// stop time measurement, why here and not directly after the kernel call?
get_walltime(&finish);
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// calculating time delta and Mflops
delta = (finish - start);
nd = (double) N;
flops = nd/(delta * 1000000.);
std::cout << ">>>>> finish: " << finish << std::endl;
std::cout << ">>>>> delta: " << delta << std::endl;
std::cout << ">>>>> Mflops: " << flops << std::endl;
// Free memory
hipFree(x);
hipFree(y);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z3addiPfS_
.globl _Z3addiPfS_
.p2align 8
.type _Z3addiPfS_,@function
_Z3addiPfS_:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x24
s_load_b32 s10, s[0:1], 0x0
s_add_u32 s2, s0, 24
s_addc_u32 s3, s1, 0
s_waitcnt lgkmcnt(0)
s_and_b32 s8, s4, 0xffff
s_mov_b32 s4, exec_lo
v_mad_u64_u32 v[1:2], null, s15, s8, v[0:1]
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s10, v1
s_cbranch_execz .LBB0_3
s_load_b32 s2, s[2:3], 0x0
s_load_b128 s[4:7], s[0:1], 0x8
v_ashrrev_i32_e32 v2, 31, v1
s_mov_b32 s1, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
v_lshlrev_b64 v[2:3], 2, v[1:2]
s_waitcnt lgkmcnt(0)
s_mul_i32 s2, s2, s8
s_ashr_i32 s3, s2, 31
s_delay_alu instid0(SALU_CYCLE_1)
s_lshl_b64 s[8:9], s[2:3], 2
.p2align 6
.LBB0_2:
s_delay_alu instid0(VALU_DEP_1)
v_add_co_u32 v4, vcc_lo, s4, v2
v_add_co_ci_u32_e32 v5, vcc_lo, s5, v3, vcc_lo
v_add_co_u32 v6, vcc_lo, s6, v2
v_add_co_ci_u32_e32 v7, vcc_lo, s7, v3, vcc_lo
v_add_co_u32 v2, vcc_lo, v2, s8
global_load_b32 v0, v[4:5], off
global_load_b32 v4, v[6:7], off
v_add_nc_u32_e32 v1, s2, v1
v_add_co_ci_u32_e32 v3, vcc_lo, s9, v3, vcc_lo
s_waitcnt vmcnt(0)
v_add_f32_e32 v0, v0, v4
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
v_cmp_le_i32_e64 s0, s10, v1
global_store_b32 v[6:7], v0, off
s_or_b32 s1, s0, s1
s_and_not1_b32 exec_lo, exec_lo, s1
s_cbranch_execnz .LBB0_2
.LBB0_3:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z3addiPfS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 8
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z3addiPfS_, .Lfunc_end0-_Z3addiPfS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z3addiPfS_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z3addiPfS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 8
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | // This example is taken from https://devblogs.nvidia.com/even-easier-introduction-cuda/
#include <hip/hip_runtime.h>
#include <iostream>
#include <stdio.h>
#include <math.h>
#include <sys/time.h>
// get_walltime function for time measurement
double get_walltime_(double* wcTime) {
struct timeval tp;
gettimeofday(&tp, NULL);
*wcTime = (double)(tp.tv_sec + tp.tv_usec/1000000.0);
return 0.0;
}
void get_walltime(double* wcTime) {
get_walltime_(wcTime);
}
// CUDA Kernel function to add the elements of two arrays
__global__
void add(int n, float *x, float *y)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
//printf("Thread ID: %d \t Block ID: %d\n", threadIdx.x, blockIdx.x);
for (int i = index; i < n; i+=stride)
y[i] = x[i] + y[i];
}
int main(void)
{
int N = 1<<20;
float *x, *y;
double delta, finish, start;
double flops, nd;
// Allocate Unified Memory – accessible from CPU or GPU
hipMallocManaged(&x, N*sizeof(float));
hipMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on 1M elements on the GPU
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
// start time measurement
get_walltime(&start);
// execute the CUDA kernel function
add<<<numBlocks, blockSize>>>(N, x, y);
// Wait for GPU to finish before accessing on host
hipDeviceSynchronize();
// stop time measurement, why here and not directly after the kernel call?
get_walltime(&finish);
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// calculating time delta and Mflops
delta = (finish - start);
nd = (double) N;
flops = nd/(delta * 1000000.);
std::cout << ">>>>> finish: " << finish << std::endl;
std::cout << ">>>>> delta: " << delta << std::endl;
std::cout << ">>>>> Mflops: " << flops << std::endl;
// Free memory
hipFree(x);
hipFree(y);
return 0;
} | .text
.file "add.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function _Z13get_walltime_Pd
.LCPI0_0:
.quad 0x412e848000000000 # double 1.0E+6
.text
.globl _Z13get_walltime_Pd
.p2align 4, 0x90
.type _Z13get_walltime_Pd,@function
_Z13get_walltime_Pd: # @_Z13get_walltime_Pd
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $16, %rsp
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -16
movq %rdi, %rbx
movq %rsp, %rdi
xorl %esi, %esi
callq gettimeofday
cvtsi2sdq (%rsp), %xmm0
cvtsi2sdq 8(%rsp), %xmm1
divsd .LCPI0_0(%rip), %xmm1
addsd %xmm0, %xmm1
movsd %xmm1, (%rbx)
xorpd %xmm0, %xmm0
addq $16, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size _Z13get_walltime_Pd, .Lfunc_end0-_Z13get_walltime_Pd
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function _Z12get_walltimePd
.LCPI1_0:
.quad 0x412e848000000000 # double 1.0E+6
.text
.globl _Z12get_walltimePd
.p2align 4, 0x90
.type _Z12get_walltimePd,@function
_Z12get_walltimePd: # @_Z12get_walltimePd
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $16, %rsp
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -16
movq %rdi, %rbx
movq %rsp, %rdi
xorl %esi, %esi
callq gettimeofday
cvtsi2sdq (%rsp), %xmm0
cvtsi2sdq 8(%rsp), %xmm1
divsd .LCPI1_0(%rip), %xmm1
addsd %xmm0, %xmm1
movsd %xmm1, (%rbx)
addq $16, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z12get_walltimePd, .Lfunc_end1-_Z12get_walltimePd
.cfi_endproc
# -- End function
.globl _Z18__device_stub__addiPfS_ # -- Begin function _Z18__device_stub__addiPfS_
.p2align 4, 0x90
.type _Z18__device_stub__addiPfS_,@function
_Z18__device_stub__addiPfS_: # @_Z18__device_stub__addiPfS_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movl %edi, 12(%rsp)
movq %rsi, 72(%rsp)
movq %rdx, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 72(%rsp), %rax
movq %rax, 88(%rsp)
leaq 64(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z3addiPfS_, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end2:
.size _Z18__device_stub__addiPfS_, .Lfunc_end2-_Z18__device_stub__addiPfS_
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI3_0:
.quad 0x412e848000000000 # double 1.0E+6
.LCPI3_3:
.quad 0x4130000000000000 # double 1048576
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0
.LCPI3_1:
.long 0xc0400000 # float -3
.section .rodata.cst16,"aM",@progbits,16
.p2align 4, 0x0
.LCPI3_2:
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
subq $168, %rsp
.cfi_def_cfa_offset 192
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
leaq 32(%rsp), %rdi
movl $4194304, %esi # imm = 0x400000
movl $1, %edx
callq hipMallocManaged
leaq 16(%rsp), %rdi
movl $4194304, %esi # imm = 0x400000
movl $1, %edx
callq hipMallocManaged
movq 32(%rsp), %rax
xorl %ecx, %ecx
movq 16(%rsp), %rdx
.p2align 4, 0x90
.LBB3_1: # =>This Inner Loop Header: Depth=1
movl $1065353216, (%rax,%rcx,4) # imm = 0x3F800000
movl $1073741824, (%rdx,%rcx,4) # imm = 0x40000000
incq %rcx
cmpq $1048576, %rcx # imm = 0x100000
jne .LBB3_1
# %bb.2:
leaq 48(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
cvtsi2sdq 48(%rsp), %xmm0
movsd %xmm0, 8(%rsp) # 8-byte Spill
xorps %xmm0, %xmm0
cvtsi2sdq 56(%rsp), %xmm0
divsd .LCPI3_0(%rip), %xmm0
movsd %xmm0, 24(%rsp) # 8-byte Spill
movabsq $4294967552, %rdx # imm = 0x100000100
leaq 3840(%rdx), %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_4
# %bb.3:
movq 32(%rsp), %rax
movq 16(%rsp), %rcx
movl $1048576, 44(%rsp) # imm = 0x100000
movq %rax, 136(%rsp)
movq %rcx, 128(%rsp)
leaq 44(%rsp), %rax
movq %rax, 48(%rsp)
leaq 136(%rsp), %rax
movq %rax, 56(%rsp)
leaq 128(%rsp), %rax
movq %rax, 64(%rsp)
leaq 112(%rsp), %rdi
leaq 96(%rsp), %rsi
leaq 88(%rsp), %rdx
leaq 80(%rsp), %rcx
callq __hipPopCallConfiguration
movq 112(%rsp), %rsi
movl 120(%rsp), %edx
movq 96(%rsp), %rcx
movl 104(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z3addiPfS_, %edi
pushq 80(%rsp)
.cfi_adjust_cfa_offset 8
pushq 96(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_4:
movsd 24(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
addsd 8(%rsp), %xmm0 # 8-byte Folded Reload
movsd %xmm0, 24(%rsp) # 8-byte Spill
callq hipDeviceSynchronize
xorl %ebx, %ebx
leaq 48(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
xorps %xmm0, %xmm0
cvtsi2sdq 56(%rsp), %xmm0
movq 48(%rsp), %r14
divsd .LCPI3_0(%rip), %xmm0
movsd %xmm0, 8(%rsp) # 8-byte Spill
xorps %xmm2, %xmm2
movq 16(%rsp), %rax
movss .LCPI3_1(%rip), %xmm0 # xmm0 = mem[0],zero,zero,zero
movaps .LCPI3_2(%rip), %xmm1 # xmm1 = [NaN,NaN,NaN,NaN]
movaps %xmm2, %xmm5
.p2align 4, 0x90
.LBB3_5: # =>This Inner Loop Header: Depth=1
movss (%rax,%rbx,4), %xmm3 # xmm3 = mem[0],zero,zero,zero
addss %xmm0, %xmm3
andps %xmm1, %xmm3
cmpunordss %xmm5, %xmm5
movaps %xmm5, %xmm4
andps %xmm3, %xmm4
maxss %xmm2, %xmm3
andnps %xmm3, %xmm5
orps %xmm4, %xmm5
incq %rbx
movaps %xmm5, %xmm2
cmpq $1048576, %rbx # imm = 0x100000
jne .LBB3_5
# %bb.6:
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $11, %edx
movaps %xmm5, 144(%rsp) # 16-byte Spill
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movaps 144(%rsp), %xmm0 # 16-byte Reload
cvtss2sd %xmm0, %xmm0
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertIdEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbx
testq %rbx, %rbx
je .LBB3_23
# %bb.7: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
xorps %xmm0, %xmm0
cvtsi2sd %r14, %xmm0
movsd 8(%rsp), %xmm1 # 8-byte Reload
# xmm1 = mem[0],zero
addsd %xmm0, %xmm1
movsd %xmm1, 8(%rsp) # 8-byte Spill
cmpb $0, 56(%rbx)
je .LBB3_9
# %bb.8:
movzbl 67(%rbx), %ecx
jmp .LBB3_10
.LBB3_9:
movq %rbx, %rdi
movq %rax, %r14
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r14, %rax
.LBB3_10: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movl $_ZSt4cout, %edi
movl $.L.str.1, %esi
movl $14, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movsd 8(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
callq _ZNSo9_M_insertIdEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbx
testq %rbx, %rbx
je .LBB3_23
# %bb.11: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i28
movsd 8(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
subsd 24(%rsp), %xmm0 # 8-byte Folded Reload
movsd %xmm0, 8(%rsp) # 8-byte Spill
cmpb $0, 56(%rbx)
je .LBB3_13
# %bb.12:
movzbl 67(%rbx), %ecx
jmp .LBB3_14
.LBB3_13:
movq %rbx, %rdi
movq %rax, %r14
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r14, %rax
.LBB3_14: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit31
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movl $_ZSt4cout, %edi
movl $.L.str.2, %esi
movl $13, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movsd 8(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
callq _ZNSo9_M_insertIdEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbx
testq %rbx, %rbx
je .LBB3_23
# %bb.15: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i33
movsd 8(%rsp), %xmm1 # 8-byte Reload
# xmm1 = mem[0],zero
mulsd .LCPI3_0(%rip), %xmm1
movsd .LCPI3_3(%rip), %xmm0 # xmm0 = mem[0],zero
divsd %xmm1, %xmm0
movsd %xmm0, 8(%rsp) # 8-byte Spill
cmpb $0, 56(%rbx)
je .LBB3_17
# %bb.16:
movzbl 67(%rbx), %ecx
jmp .LBB3_18
.LBB3_17:
movq %rbx, %rdi
movq %rax, %r14
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r14, %rax
.LBB3_18: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit36
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movl $_ZSt4cout, %edi
movl $.L.str.3, %esi
movl $14, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movsd 8(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
callq _ZNSo9_M_insertIdEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbx
testq %rbx, %rbx
je .LBB3_23
# %bb.19: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i38
cmpb $0, 56(%rbx)
je .LBB3_21
# %bb.20:
movzbl 67(%rbx), %ecx
jmp .LBB3_22
.LBB3_21:
movq %rbx, %rdi
movq %rax, %r14
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r14, %rax
.LBB3_22: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit41
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movq 32(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $168, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
retq
.LBB3_23:
.cfi_def_cfa_offset 192
callq _ZSt16__throw_bad_castv
.Lfunc_end3:
.size main, .Lfunc_end3-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z3addiPfS_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z3addiPfS_,@object # @_Z3addiPfS_
.section .rodata,"a",@progbits
.globl _Z3addiPfS_
.p2align 3, 0x0
_Z3addiPfS_:
.quad _Z18__device_stub__addiPfS_
.size _Z3addiPfS_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Max error: "
.size .L.str, 12
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz ">>>>> finish: "
.size .L.str.1, 15
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz ">>>>> delta: "
.size .L.str.2, 14
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz ">>>>> Mflops: "
.size .L.str.3, 15
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z3addiPfS_"
.size .L__unnamed_1, 12
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z18__device_stub__addiPfS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z3addiPfS_
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z3addiPfS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e280000002500 */
/*0020*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R3, R3, c[0x0][0x0], R0 ; /* 0x0000000003037a24 */
/* 0x001fca00078e0200 */
/*0040*/ ISETP.GE.AND P0, PT, R3, c[0x0][0x160], PT ; /* 0x0000580003007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ MOV R0, c[0x0][0x0] ; /* 0x0000000000007a02 */
/* 0x000fe20000000f00 */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0080*/ BSSY B0, 0x320 ; /* 0x0000029000007945 */
/* 0x000fe60003800000 */
/*0090*/ IMAD R0, R0, c[0x0][0xc], RZ ; /* 0x0000030000007a24 */
/* 0x000fc800078e02ff */
/*00a0*/ I2F.U32.RP R6, R0 ; /* 0x0000000000067306 */
/* 0x000e220000209000 */
/*00b0*/ IMAD.MOV R9, RZ, RZ, -R0 ; /* 0x000000ffff097224 */
/* 0x000fe200078e0a00 */
/*00c0*/ IADD3 R2, R0.reuse, R3, RZ ; /* 0x0000000300027210 */
/* 0x040fe40007ffe0ff */
/*00d0*/ ISETP.NE.U32.AND P2, PT, R0, RZ, PT ; /* 0x000000ff0000720c */
/* 0x000fe40003f45070 */
/*00e0*/ LOP3.LUT R7, RZ, R2, RZ, 0x33, !PT ; /* 0x00000002ff077212 */
/* 0x000fc800078e33ff */
/*00f0*/ IADD3 R7, R7, c[0x0][0x160], R0 ; /* 0x0000580007077a10 */
/* 0x000fe20007ffe000 */
/*0100*/ MUFU.RCP R6, R6 ; /* 0x0000000600067308 */
/* 0x001e240000001000 */
/*0110*/ IADD3 R4, R6, 0xffffffe, RZ ; /* 0x0ffffffe06047810 */
/* 0x001fcc0007ffe0ff */
/*0120*/ F2I.FTZ.U32.TRUNC.NTZ R5, R4 ; /* 0x0000000400057305 */
/* 0x000064000021f000 */
/*0130*/ HFMA2.MMA R4, -RZ, RZ, 0, 0 ; /* 0x00000000ff047435 */
/* 0x001fe200000001ff */
/*0140*/ IMAD R9, R9, R5, RZ ; /* 0x0000000509097224 */
/* 0x002fd200078e02ff */
/*0150*/ IMAD.HI.U32 R2, R5, R9, R4 ; /* 0x0000000905027227 */
/* 0x000fcc00078e0004 */
/*0160*/ IMAD.HI.U32 R2, R2, R7, RZ ; /* 0x0000000702027227 */
/* 0x000fc800078e00ff */
/*0170*/ IMAD.MOV R4, RZ, RZ, -R2 ; /* 0x000000ffff047224 */
/* 0x000fc800078e0a02 */
/*0180*/ IMAD R7, R0, R4, R7 ; /* 0x0000000400077224 */
/* 0x000fca00078e0207 */
/*0190*/ ISETP.GE.U32.AND P0, PT, R7, R0, PT ; /* 0x000000000700720c */
/* 0x000fda0003f06070 */
/*01a0*/ @P0 IADD3 R7, -R0, R7, RZ ; /* 0x0000000700070210 */
/* 0x000fe40007ffe1ff */
/*01b0*/ @P0 IADD3 R2, R2, 0x1, RZ ; /* 0x0000000102020810 */
/* 0x000fe40007ffe0ff */
/*01c0*/ ISETP.GE.U32.AND P1, PT, R7, R0, PT ; /* 0x000000000700720c */
/* 0x000fda0003f26070 */
/*01d0*/ @P1 IADD3 R2, R2, 0x1, RZ ; /* 0x0000000102021810 */
/* 0x000fe40007ffe0ff */
/*01e0*/ @!P2 LOP3.LUT R2, RZ, R0, RZ, 0x33, !PT ; /* 0x00000000ff02a212 */
/* 0x000fc800078e33ff */
/*01f0*/ IADD3 R4, R2.reuse, 0x1, RZ ; /* 0x0000000102047810 */
/* 0x040fe40007ffe0ff */
/*0200*/ ISETP.GE.U32.AND P1, PT, R2, 0x3, PT ; /* 0x000000030200780c */
/* 0x000fe40003f26070 */
/*0210*/ LOP3.LUT P0, R4, R4, 0x3, RZ, 0xc0, !PT ; /* 0x0000000304047812 */
/* 0x000fda000780c0ff */
/*0220*/ @!P0 BRA 0x310 ; /* 0x000000e000008947 */
/* 0x000fea0003800000 */
/*0230*/ MOV R6, 0x4 ; /* 0x0000000400067802 */
/* 0x000fe20000000f00 */
/*0240*/ IMAD.MOV.U32 R2, RZ, RZ, R4 ; /* 0x000000ffff027224 */
/* 0x000fc800078e0004 */
/*0250*/ IMAD.WIDE R4, R3, R6, c[0x0][0x170] ; /* 0x00005c0003047625 */
/* 0x000fc800078e0206 */
/*0260*/ IMAD.WIDE R6, R3, R6, c[0x0][0x168] ; /* 0x00005a0003067625 */
/* 0x000fc800078e0206 */
/*0270*/ LDG.E R8, [R4.64] ; /* 0x0000000404087981 */
/* 0x000ea8000c1e1900 */
/*0280*/ LDG.E R9, [R6.64] ; /* 0x0000000406097981 */
/* 0x0000a2000c1e1900 */
/*0290*/ IADD3 R2, R2, -0x1, RZ ; /* 0xffffffff02027810 */
/* 0x000fe40007ffe0ff */
/*02a0*/ IADD3 R3, R0, R3, RZ ; /* 0x0000000300037210 */
/* 0x000fe40007ffe0ff */
/*02b0*/ ISETP.NE.AND P0, PT, R2, RZ, PT ; /* 0x000000ff0200720c */
/* 0x000fe20003f05270 */
/*02c0*/ IMAD.WIDE R6, R0, 0x4, R6 ; /* 0x0000000400067825 */
/* 0x001fc800078e0206 */
/*02d0*/ FADD R9, R8, R9 ; /* 0x0000000908097221 */
/* 0x004fca0000000000 */
/*02e0*/ STG.E [R4.64], R9 ; /* 0x0000000904007986 */
/* 0x0001e4000c101904 */
/*02f0*/ IMAD.WIDE R4, R0, 0x4, R4 ; /* 0x0000000400047825 */
/* 0x001fe200078e0204 */
/*0300*/ @P0 BRA 0x270 ; /* 0xffffff6000000947 */
/* 0x000fea000383ffff */
/*0310*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0320*/ @!P1 EXIT ; /* 0x000000000000994d */
/* 0x000fea0003800000 */
/*0330*/ HFMA2.MMA R6, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff067435 */
/* 0x002fd400000001ff */
/*0340*/ IMAD.WIDE R4, R3, R6, c[0x0][0x168] ; /* 0x00005a0003047625 */
/* 0x000fc800078e0206 */
/*0350*/ IMAD.WIDE R6, R3, R6, c[0x0][0x170] ; /* 0x00005c0003067625 */
/* 0x000fe200078e0206 */
/*0360*/ LDG.E R9, [R4.64] ; /* 0x0000000404097981 */
/* 0x000ea8000c1e1900 */
/*0370*/ LDG.E R2, [R6.64] ; /* 0x0000000406027981 */
/* 0x000ea2000c1e1900 */
/*0380*/ IMAD.WIDE R10, R0, 0x4, R6 ; /* 0x00000004000a7825 */
/* 0x000fc800078e0206 */
/*0390*/ FADD R17, R2, R9 ; /* 0x0000000902117221 */
/* 0x004fe40000000000 */
/*03a0*/ IMAD.WIDE R8, R0, 0x4, R4 ; /* 0x0000000400087825 */
/* 0x000fc600078e0204 */
/*03b0*/ STG.E [R6.64], R17 ; /* 0x0000001106007986 */
/* 0x0001e8000c101904 */
/*03c0*/ LDG.E R2, [R10.64] ; /* 0x000000040a027981 */
/* 0x000ea8000c1e1900 */
/*03d0*/ LDG.E R13, [R8.64] ; /* 0x00000004080d7981 */
/* 0x000ea2000c1e1900 */
/*03e0*/ IMAD.WIDE R14, R0, 0x4, R10 ; /* 0x00000004000e7825 */
/* 0x000fc800078e020a */
/*03f0*/ FADD R19, R2, R13 ; /* 0x0000000d02137221 */
/* 0x004fe40000000000 */
/*0400*/ IMAD.WIDE R12, R0, 0x4, R8 ; /* 0x00000004000c7825 */
/* 0x000fc600078e0208 */
/*0410*/ STG.E [R10.64], R19 ; /* 0x000000130a007986 */
/* 0x0003e8000c101904 */
/*0420*/ LDG.E R2, [R14.64] ; /* 0x000000040e027981 */
/* 0x000ea8000c1e1900 */
/*0430*/ LDG.E R5, [R12.64] ; /* 0x000000040c057981 */
/* 0x000ea2000c1e1900 */
/*0440*/ IMAD.WIDE R6, R0, 0x4, R14 ; /* 0x0000000400067825 */
/* 0x001fc800078e020e */
/*0450*/ FADD R21, R2, R5 ; /* 0x0000000502157221 */
/* 0x004fe40000000000 */
/*0460*/ IMAD.WIDE R4, R0, 0x4, R12 ; /* 0x0000000400047825 */
/* 0x000fc600078e020c */
/*0470*/ STG.E [R14.64], R21 ; /* 0x000000150e007986 */
/* 0x0003e8000c101904 */
/*0480*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea8000c1e1900 */
/*0490*/ LDG.E R2, [R6.64] ; /* 0x0000000406027981 */
/* 0x000ea2000c1e1900 */
/*04a0*/ IADD3 R3, R0, R3, R0 ; /* 0x0000000300037210 */
/* 0x000fc80007ffe000 */
/*04b0*/ IADD3 R3, R0, R3, R0 ; /* 0x0000000300037210 */
/* 0x000fc80007ffe000 */
/*04c0*/ ISETP.GE.AND P0, PT, R3, c[0x0][0x160], PT ; /* 0x0000580003007a0c */
/* 0x000fe20003f06270 */
/*04d0*/ FADD R9, R2, R5 ; /* 0x0000000502097221 */
/* 0x004fca0000000000 */
/*04e0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x0003ee000c101904 */
/*04f0*/ @!P0 BRA 0x330 ; /* 0xfffffe3000008947 */
/* 0x000fea000383ffff */
/*0500*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0510*/ BRA 0x510; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0520*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0530*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0540*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0550*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0560*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0570*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0580*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0590*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z3addiPfS_
.globl _Z3addiPfS_
.p2align 8
.type _Z3addiPfS_,@function
_Z3addiPfS_:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x24
s_load_b32 s10, s[0:1], 0x0
s_add_u32 s2, s0, 24
s_addc_u32 s3, s1, 0
s_waitcnt lgkmcnt(0)
s_and_b32 s8, s4, 0xffff
s_mov_b32 s4, exec_lo
v_mad_u64_u32 v[1:2], null, s15, s8, v[0:1]
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s10, v1
s_cbranch_execz .LBB0_3
s_load_b32 s2, s[2:3], 0x0
s_load_b128 s[4:7], s[0:1], 0x8
v_ashrrev_i32_e32 v2, 31, v1
s_mov_b32 s1, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
v_lshlrev_b64 v[2:3], 2, v[1:2]
s_waitcnt lgkmcnt(0)
s_mul_i32 s2, s2, s8
s_ashr_i32 s3, s2, 31
s_delay_alu instid0(SALU_CYCLE_1)
s_lshl_b64 s[8:9], s[2:3], 2
.p2align 6
.LBB0_2:
s_delay_alu instid0(VALU_DEP_1)
v_add_co_u32 v4, vcc_lo, s4, v2
v_add_co_ci_u32_e32 v5, vcc_lo, s5, v3, vcc_lo
v_add_co_u32 v6, vcc_lo, s6, v2
v_add_co_ci_u32_e32 v7, vcc_lo, s7, v3, vcc_lo
v_add_co_u32 v2, vcc_lo, v2, s8
global_load_b32 v0, v[4:5], off
global_load_b32 v4, v[6:7], off
v_add_nc_u32_e32 v1, s2, v1
v_add_co_ci_u32_e32 v3, vcc_lo, s9, v3, vcc_lo
s_waitcnt vmcnt(0)
v_add_f32_e32 v0, v0, v4
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
v_cmp_le_i32_e64 s0, s10, v1
global_store_b32 v[6:7], v0, off
s_or_b32 s1, s0, s1
s_and_not1_b32 exec_lo, exec_lo, s1
s_cbranch_execnz .LBB0_2
.LBB0_3:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z3addiPfS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 8
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z3addiPfS_, .Lfunc_end0-_Z3addiPfS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z3addiPfS_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z3addiPfS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 8
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0011e84f_00000000-6_add.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3674:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3674:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z13get_walltime_Pd
.type _Z13get_walltime_Pd, @function
_Z13get_walltime_Pd:
.LFB3669:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
subq $32, %rsp
.cfi_def_cfa_offset 48
movq %rdi, %rbx
movq %fs:40, %rax
movq %rax, 24(%rsp)
xorl %eax, %eax
movq %rsp, %rdi
movl $0, %esi
call gettimeofday@PLT
pxor %xmm0, %xmm0
cvtsi2sdq 8(%rsp), %xmm0
divsd .LC0(%rip), %xmm0
pxor %xmm1, %xmm1
cvtsi2sdq (%rsp), %xmm1
addsd %xmm1, %xmm0
movsd %xmm0, (%rbx)
movq 24(%rsp), %rax
subq %fs:40, %rax
jne .L6
pxor %xmm0, %xmm0
addq $32, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
ret
.L6:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3669:
.size _Z13get_walltime_Pd, .-_Z13get_walltime_Pd
.globl _Z12get_walltimePd
.type _Z12get_walltimePd, @function
_Z12get_walltimePd:
.LFB3670:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z13get_walltime_Pd
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3670:
.size _Z12get_walltimePd, .-_Z12get_walltimePd
.globl _Z25__device_stub__Z3addiPfS_iPfS_
.type _Z25__device_stub__Z3addiPfS_iPfS_, @function
_Z25__device_stub__Z3addiPfS_iPfS_:
.LFB3696:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movl %edi, 28(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L13
.L9:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L14
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L13:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z3addiPfS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L9
.L14:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3696:
.size _Z25__device_stub__Z3addiPfS_iPfS_, .-_Z25__device_stub__Z3addiPfS_iPfS_
.globl _Z3addiPfS_
.type _Z3addiPfS_, @function
_Z3addiPfS_:
.LFB3697:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z25__device_stub__Z3addiPfS_iPfS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3697:
.size _Z3addiPfS_, .-_Z3addiPfS_
.section .rodata.str1.1,"aMS",@progbits,1
.LC7:
.string "Max error: "
.LC9:
.string ">>>>> finish: "
.LC10:
.string ">>>>> delta: "
.LC11:
.string ">>>>> Mflops: "
.text
.globl main
.type main, @function
main:
.LFB3671:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $80, %rsp
.cfi_def_cfa_offset 112
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
leaq 16(%rsp), %rdi
movl $1, %edx
movl $4194304, %esi
call cudaMallocManaged@PLT
leaq 24(%rsp), %rdi
movl $1, %edx
movl $4194304, %esi
call cudaMallocManaged@PLT
movl $0, %eax
movss .LC3(%rip), %xmm1
movss .LC4(%rip), %xmm0
.L18:
movq 16(%rsp), %rdx
movss %xmm1, (%rdx,%rax)
movq 24(%rsp), %rdx
movss %xmm0, (%rdx,%rax)
addq $4, %rax
cmpq $4194304, %rax
jne .L18
leaq 32(%rsp), %rdi
call _Z13get_walltime_Pd
movl $256, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $4096, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 56(%rsp), %rdx
movl $1, %ecx
movq 44(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L25
.L19:
call cudaDeviceSynchronize@PLT
leaq 56(%rsp), %rdi
call _Z13get_walltime_Pd
movq 24(%rsp), %rbx
leaq 4194304(%rbx), %rbp
movl $0x00000000, 8(%rsp)
.L20:
movss (%rbx), %xmm0
subss .LC5(%rip), %xmm0
andps .LC6(%rip), %xmm0
movss 8(%rsp), %xmm1
call fmaxf@PLT
movss %xmm0, 8(%rsp)
addq $4, %rbx
cmpq %rbp, %rbx
jne .L20
leaq .LC7(%rip), %rsi
leaq _ZSt4cout(%rip), %rbx
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
pxor %xmm0, %xmm0
cvtss2sd 8(%rsp), %xmm0
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movq 56(%rsp), %r12
movq %r12, %xmm2
subsd 32(%rsp), %xmm2
movsd %xmm2, 8(%rsp)
movapd %xmm2, %xmm3
mulsd .LC0(%rip), %xmm3
movsd .LC8(%rip), %xmm1
divsd %xmm3, %xmm1
movq %xmm1, %rbp
leaq .LC9(%rip), %rsi
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movq %r12, %xmm0
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
leaq .LC10(%rip), %rsi
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movsd 8(%rsp), %xmm0
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
leaq .LC11(%rip), %rsi
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movq %rbp, %xmm0
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L26
movl $0, %eax
addq $80, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L25:
.cfi_restore_state
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
movl $1048576, %edi
call _Z25__device_stub__Z3addiPfS_iPfS_
jmp .L19
.L26:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3671:
.size main, .-main
.section .rodata.str1.1
.LC12:
.string "_Z3addiPfS_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3699:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC12(%rip), %rdx
movq %rdx, %rcx
leaq _Z3addiPfS_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3699:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC0:
.long 0
.long 1093567616
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC3:
.long 1065353216
.align 4
.LC4:
.long 1073741824
.align 4
.LC5:
.long 1077936128
.section .rodata.cst16,"aM",@progbits,16
.align 16
.LC6:
.long 2147483647
.long 0
.long 0
.long 0
.section .rodata.cst8
.align 8
.LC8:
.long 0
.long 1093664768
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "add.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function _Z13get_walltime_Pd
.LCPI0_0:
.quad 0x412e848000000000 # double 1.0E+6
.text
.globl _Z13get_walltime_Pd
.p2align 4, 0x90
.type _Z13get_walltime_Pd,@function
_Z13get_walltime_Pd: # @_Z13get_walltime_Pd
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $16, %rsp
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -16
movq %rdi, %rbx
movq %rsp, %rdi
xorl %esi, %esi
callq gettimeofday
cvtsi2sdq (%rsp), %xmm0
cvtsi2sdq 8(%rsp), %xmm1
divsd .LCPI0_0(%rip), %xmm1
addsd %xmm0, %xmm1
movsd %xmm1, (%rbx)
xorpd %xmm0, %xmm0
addq $16, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size _Z13get_walltime_Pd, .Lfunc_end0-_Z13get_walltime_Pd
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function _Z12get_walltimePd
.LCPI1_0:
.quad 0x412e848000000000 # double 1.0E+6
.text
.globl _Z12get_walltimePd
.p2align 4, 0x90
.type _Z12get_walltimePd,@function
_Z12get_walltimePd: # @_Z12get_walltimePd
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $16, %rsp
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -16
movq %rdi, %rbx
movq %rsp, %rdi
xorl %esi, %esi
callq gettimeofday
cvtsi2sdq (%rsp), %xmm0
cvtsi2sdq 8(%rsp), %xmm1
divsd .LCPI1_0(%rip), %xmm1
addsd %xmm0, %xmm1
movsd %xmm1, (%rbx)
addq $16, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z12get_walltimePd, .Lfunc_end1-_Z12get_walltimePd
.cfi_endproc
# -- End function
.globl _Z18__device_stub__addiPfS_ # -- Begin function _Z18__device_stub__addiPfS_
.p2align 4, 0x90
.type _Z18__device_stub__addiPfS_,@function
_Z18__device_stub__addiPfS_: # @_Z18__device_stub__addiPfS_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movl %edi, 12(%rsp)
movq %rsi, 72(%rsp)
movq %rdx, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 72(%rsp), %rax
movq %rax, 88(%rsp)
leaq 64(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z3addiPfS_, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end2:
.size _Z18__device_stub__addiPfS_, .Lfunc_end2-_Z18__device_stub__addiPfS_
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI3_0:
.quad 0x412e848000000000 # double 1.0E+6
.LCPI3_3:
.quad 0x4130000000000000 # double 1048576
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0
.LCPI3_1:
.long 0xc0400000 # float -3
.section .rodata.cst16,"aM",@progbits,16
.p2align 4, 0x0
.LCPI3_2:
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
subq $168, %rsp
.cfi_def_cfa_offset 192
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
leaq 32(%rsp), %rdi
movl $4194304, %esi # imm = 0x400000
movl $1, %edx
callq hipMallocManaged
leaq 16(%rsp), %rdi
movl $4194304, %esi # imm = 0x400000
movl $1, %edx
callq hipMallocManaged
movq 32(%rsp), %rax
xorl %ecx, %ecx
movq 16(%rsp), %rdx
.p2align 4, 0x90
.LBB3_1: # =>This Inner Loop Header: Depth=1
movl $1065353216, (%rax,%rcx,4) # imm = 0x3F800000
movl $1073741824, (%rdx,%rcx,4) # imm = 0x40000000
incq %rcx
cmpq $1048576, %rcx # imm = 0x100000
jne .LBB3_1
# %bb.2:
leaq 48(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
cvtsi2sdq 48(%rsp), %xmm0
movsd %xmm0, 8(%rsp) # 8-byte Spill
xorps %xmm0, %xmm0
cvtsi2sdq 56(%rsp), %xmm0
divsd .LCPI3_0(%rip), %xmm0
movsd %xmm0, 24(%rsp) # 8-byte Spill
movabsq $4294967552, %rdx # imm = 0x100000100
leaq 3840(%rdx), %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_4
# %bb.3:
movq 32(%rsp), %rax
movq 16(%rsp), %rcx
movl $1048576, 44(%rsp) # imm = 0x100000
movq %rax, 136(%rsp)
movq %rcx, 128(%rsp)
leaq 44(%rsp), %rax
movq %rax, 48(%rsp)
leaq 136(%rsp), %rax
movq %rax, 56(%rsp)
leaq 128(%rsp), %rax
movq %rax, 64(%rsp)
leaq 112(%rsp), %rdi
leaq 96(%rsp), %rsi
leaq 88(%rsp), %rdx
leaq 80(%rsp), %rcx
callq __hipPopCallConfiguration
movq 112(%rsp), %rsi
movl 120(%rsp), %edx
movq 96(%rsp), %rcx
movl 104(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z3addiPfS_, %edi
pushq 80(%rsp)
.cfi_adjust_cfa_offset 8
pushq 96(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_4:
movsd 24(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
addsd 8(%rsp), %xmm0 # 8-byte Folded Reload
movsd %xmm0, 24(%rsp) # 8-byte Spill
callq hipDeviceSynchronize
xorl %ebx, %ebx
leaq 48(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
xorps %xmm0, %xmm0
cvtsi2sdq 56(%rsp), %xmm0
movq 48(%rsp), %r14
divsd .LCPI3_0(%rip), %xmm0
movsd %xmm0, 8(%rsp) # 8-byte Spill
xorps %xmm2, %xmm2
movq 16(%rsp), %rax
movss .LCPI3_1(%rip), %xmm0 # xmm0 = mem[0],zero,zero,zero
movaps .LCPI3_2(%rip), %xmm1 # xmm1 = [NaN,NaN,NaN,NaN]
movaps %xmm2, %xmm5
.p2align 4, 0x90
.LBB3_5: # =>This Inner Loop Header: Depth=1
movss (%rax,%rbx,4), %xmm3 # xmm3 = mem[0],zero,zero,zero
addss %xmm0, %xmm3
andps %xmm1, %xmm3
cmpunordss %xmm5, %xmm5
movaps %xmm5, %xmm4
andps %xmm3, %xmm4
maxss %xmm2, %xmm3
andnps %xmm3, %xmm5
orps %xmm4, %xmm5
incq %rbx
movaps %xmm5, %xmm2
cmpq $1048576, %rbx # imm = 0x100000
jne .LBB3_5
# %bb.6:
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $11, %edx
movaps %xmm5, 144(%rsp) # 16-byte Spill
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movaps 144(%rsp), %xmm0 # 16-byte Reload
cvtss2sd %xmm0, %xmm0
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertIdEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbx
testq %rbx, %rbx
je .LBB3_23
# %bb.7: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
xorps %xmm0, %xmm0
cvtsi2sd %r14, %xmm0
movsd 8(%rsp), %xmm1 # 8-byte Reload
# xmm1 = mem[0],zero
addsd %xmm0, %xmm1
movsd %xmm1, 8(%rsp) # 8-byte Spill
cmpb $0, 56(%rbx)
je .LBB3_9
# %bb.8:
movzbl 67(%rbx), %ecx
jmp .LBB3_10
.LBB3_9:
movq %rbx, %rdi
movq %rax, %r14
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r14, %rax
.LBB3_10: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movl $_ZSt4cout, %edi
movl $.L.str.1, %esi
movl $14, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movsd 8(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
callq _ZNSo9_M_insertIdEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbx
testq %rbx, %rbx
je .LBB3_23
# %bb.11: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i28
movsd 8(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
subsd 24(%rsp), %xmm0 # 8-byte Folded Reload
movsd %xmm0, 8(%rsp) # 8-byte Spill
cmpb $0, 56(%rbx)
je .LBB3_13
# %bb.12:
movzbl 67(%rbx), %ecx
jmp .LBB3_14
.LBB3_13:
movq %rbx, %rdi
movq %rax, %r14
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r14, %rax
.LBB3_14: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit31
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movl $_ZSt4cout, %edi
movl $.L.str.2, %esi
movl $13, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movsd 8(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
callq _ZNSo9_M_insertIdEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbx
testq %rbx, %rbx
je .LBB3_23
# %bb.15: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i33
movsd 8(%rsp), %xmm1 # 8-byte Reload
# xmm1 = mem[0],zero
mulsd .LCPI3_0(%rip), %xmm1
movsd .LCPI3_3(%rip), %xmm0 # xmm0 = mem[0],zero
divsd %xmm1, %xmm0
movsd %xmm0, 8(%rsp) # 8-byte Spill
cmpb $0, 56(%rbx)
je .LBB3_17
# %bb.16:
movzbl 67(%rbx), %ecx
jmp .LBB3_18
.LBB3_17:
movq %rbx, %rdi
movq %rax, %r14
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r14, %rax
.LBB3_18: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit36
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movl $_ZSt4cout, %edi
movl $.L.str.3, %esi
movl $14, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movsd 8(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
callq _ZNSo9_M_insertIdEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbx
testq %rbx, %rbx
je .LBB3_23
# %bb.19: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i38
cmpb $0, 56(%rbx)
je .LBB3_21
# %bb.20:
movzbl 67(%rbx), %ecx
jmp .LBB3_22
.LBB3_21:
movq %rbx, %rdi
movq %rax, %r14
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r14, %rax
.LBB3_22: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit41
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movq 32(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $168, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
retq
.LBB3_23:
.cfi_def_cfa_offset 192
callq _ZSt16__throw_bad_castv
.Lfunc_end3:
.size main, .Lfunc_end3-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z3addiPfS_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z3addiPfS_,@object # @_Z3addiPfS_
.section .rodata,"a",@progbits
.globl _Z3addiPfS_
.p2align 3, 0x0
_Z3addiPfS_:
.quad _Z18__device_stub__addiPfS_
.size _Z3addiPfS_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Max error: "
.size .L.str, 12
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz ">>>>> finish: "
.size .L.str.1, 15
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz ">>>>> delta: "
.size .L.str.2, 14
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz ">>>>> Mflops: "
.size .L.str.3, 15
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z3addiPfS_"
.size .L__unnamed_1, 12
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z18__device_stub__addiPfS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z3addiPfS_
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "includes.h"
__global__ void profileSubphaseFindAggregates_kernel() {} | code for sm_80
Function : _Z36profileSubphaseFindAggregates_kernelv
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0020*/ BRA 0x20; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0030*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0040*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0050*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0060*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0070*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0080*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0090*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
__global__ void profileSubphaseFindAggregates_kernel() {} | .file "tmpxft_000fe541_00000000-6_profileSubphaseFindAggregates_kernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z55__device_stub__Z36profileSubphaseFindAggregates_kernelvv
.type _Z55__device_stub__Z36profileSubphaseFindAggregates_kernelvv, @function
_Z55__device_stub__Z36profileSubphaseFindAggregates_kernelvv:
.LFB2051:
.cfi_startproc
endbr64
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
leaq 8(%rsp), %rcx
movq %rsp, %rdx
leaq 28(%rsp), %rsi
leaq 16(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 8(%rsp)
.cfi_def_cfa_offset 104
pushq 8(%rsp)
.cfi_def_cfa_offset 112
leaq 80(%rsp), %r9
movq 44(%rsp), %rcx
movl 52(%rsp), %r8d
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
leaq _Z36profileSubphaseFindAggregates_kernelv(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 96
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z55__device_stub__Z36profileSubphaseFindAggregates_kernelvv, .-_Z55__device_stub__Z36profileSubphaseFindAggregates_kernelvv
.globl _Z36profileSubphaseFindAggregates_kernelv
.type _Z36profileSubphaseFindAggregates_kernelv, @function
_Z36profileSubphaseFindAggregates_kernelv:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z55__device_stub__Z36profileSubphaseFindAggregates_kernelvv
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z36profileSubphaseFindAggregates_kernelv, .-_Z36profileSubphaseFindAggregates_kernelv
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z36profileSubphaseFindAggregates_kernelv"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z36profileSubphaseFindAggregates_kernelv(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
__global__ void profileSubphaseFindAggregates_kernel() {} | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void profileSubphaseFindAggregates_kernel() {} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void profileSubphaseFindAggregates_kernel() {} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z36profileSubphaseFindAggregates_kernelv
.globl _Z36profileSubphaseFindAggregates_kernelv
.p2align 8
.type _Z36profileSubphaseFindAggregates_kernelv,@function
_Z36profileSubphaseFindAggregates_kernelv:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z36profileSubphaseFindAggregates_kernelv
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 0
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 0
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 1
.amdhsa_next_free_sgpr 1
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z36profileSubphaseFindAggregates_kernelv, .Lfunc_end0-_Z36profileSubphaseFindAggregates_kernelv
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args: []
.group_segment_fixed_size: 0
.kernarg_segment_align: 4
.kernarg_segment_size: 0
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z36profileSubphaseFindAggregates_kernelv
.private_segment_fixed_size: 0
.sgpr_count: 0
.sgpr_spill_count: 0
.symbol: _Z36profileSubphaseFindAggregates_kernelv.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 0
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void profileSubphaseFindAggregates_kernel() {} | .text
.file "profileSubphaseFindAggregates_kernel.hip"
.globl _Z51__device_stub__profileSubphaseFindAggregates_kernelv # -- Begin function _Z51__device_stub__profileSubphaseFindAggregates_kernelv
.p2align 4, 0x90
.type _Z51__device_stub__profileSubphaseFindAggregates_kernelv,@function
_Z51__device_stub__profileSubphaseFindAggregates_kernelv: # @_Z51__device_stub__profileSubphaseFindAggregates_kernelv
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z36profileSubphaseFindAggregates_kernelv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $72, %rsp
.cfi_adjust_cfa_offset -72
retq
.Lfunc_end0:
.size _Z51__device_stub__profileSubphaseFindAggregates_kernelv, .Lfunc_end0-_Z51__device_stub__profileSubphaseFindAggregates_kernelv
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z36profileSubphaseFindAggregates_kernelv, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z36profileSubphaseFindAggregates_kernelv,@object # @_Z36profileSubphaseFindAggregates_kernelv
.section .rodata,"a",@progbits
.globl _Z36profileSubphaseFindAggregates_kernelv
.p2align 3, 0x0
_Z36profileSubphaseFindAggregates_kernelv:
.quad _Z51__device_stub__profileSubphaseFindAggregates_kernelv
.size _Z36profileSubphaseFindAggregates_kernelv, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z36profileSubphaseFindAggregates_kernelv"
.size .L__unnamed_1, 42
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z51__device_stub__profileSubphaseFindAggregates_kernelv
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z36profileSubphaseFindAggregates_kernelv
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z36profileSubphaseFindAggregates_kernelv
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0020*/ BRA 0x20; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0030*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0040*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0050*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0060*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0070*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0080*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0090*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z36profileSubphaseFindAggregates_kernelv
.globl _Z36profileSubphaseFindAggregates_kernelv
.p2align 8
.type _Z36profileSubphaseFindAggregates_kernelv,@function
_Z36profileSubphaseFindAggregates_kernelv:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z36profileSubphaseFindAggregates_kernelv
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 0
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 0
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 1
.amdhsa_next_free_sgpr 1
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z36profileSubphaseFindAggregates_kernelv, .Lfunc_end0-_Z36profileSubphaseFindAggregates_kernelv
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args: []
.group_segment_fixed_size: 0
.kernarg_segment_align: 4
.kernarg_segment_size: 0
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z36profileSubphaseFindAggregates_kernelv
.private_segment_fixed_size: 0
.sgpr_count: 0
.sgpr_spill_count: 0
.symbol: _Z36profileSubphaseFindAggregates_kernelv.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 0
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000fe541_00000000-6_profileSubphaseFindAggregates_kernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z55__device_stub__Z36profileSubphaseFindAggregates_kernelvv
.type _Z55__device_stub__Z36profileSubphaseFindAggregates_kernelvv, @function
_Z55__device_stub__Z36profileSubphaseFindAggregates_kernelvv:
.LFB2051:
.cfi_startproc
endbr64
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
leaq 8(%rsp), %rcx
movq %rsp, %rdx
leaq 28(%rsp), %rsi
leaq 16(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 8(%rsp)
.cfi_def_cfa_offset 104
pushq 8(%rsp)
.cfi_def_cfa_offset 112
leaq 80(%rsp), %r9
movq 44(%rsp), %rcx
movl 52(%rsp), %r8d
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
leaq _Z36profileSubphaseFindAggregates_kernelv(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 96
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z55__device_stub__Z36profileSubphaseFindAggregates_kernelvv, .-_Z55__device_stub__Z36profileSubphaseFindAggregates_kernelvv
.globl _Z36profileSubphaseFindAggregates_kernelv
.type _Z36profileSubphaseFindAggregates_kernelv, @function
_Z36profileSubphaseFindAggregates_kernelv:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z55__device_stub__Z36profileSubphaseFindAggregates_kernelvv
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z36profileSubphaseFindAggregates_kernelv, .-_Z36profileSubphaseFindAggregates_kernelv
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z36profileSubphaseFindAggregates_kernelv"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z36profileSubphaseFindAggregates_kernelv(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "profileSubphaseFindAggregates_kernel.hip"
.globl _Z51__device_stub__profileSubphaseFindAggregates_kernelv # -- Begin function _Z51__device_stub__profileSubphaseFindAggregates_kernelv
.p2align 4, 0x90
.type _Z51__device_stub__profileSubphaseFindAggregates_kernelv,@function
_Z51__device_stub__profileSubphaseFindAggregates_kernelv: # @_Z51__device_stub__profileSubphaseFindAggregates_kernelv
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z36profileSubphaseFindAggregates_kernelv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $72, %rsp
.cfi_adjust_cfa_offset -72
retq
.Lfunc_end0:
.size _Z51__device_stub__profileSubphaseFindAggregates_kernelv, .Lfunc_end0-_Z51__device_stub__profileSubphaseFindAggregates_kernelv
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z36profileSubphaseFindAggregates_kernelv, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z36profileSubphaseFindAggregates_kernelv,@object # @_Z36profileSubphaseFindAggregates_kernelv
.section .rodata,"a",@progbits
.globl _Z36profileSubphaseFindAggregates_kernelv
.p2align 3, 0x0
_Z36profileSubphaseFindAggregates_kernelv:
.quad _Z51__device_stub__profileSubphaseFindAggregates_kernelv
.size _Z36profileSubphaseFindAggregates_kernelv, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z36profileSubphaseFindAggregates_kernelv"
.size .L__unnamed_1, 42
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z51__device_stub__profileSubphaseFindAggregates_kernelv
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z36profileSubphaseFindAggregates_kernelv
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | //pass
//--gridDim=[64,1,1] --blockDim=[128,1,1]
__global__ void kernelAddConstant(int *g_a, const int b)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
g_a[idx] += b;
} | code for sm_80
Function : _Z17kernelAddConstantPii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */
/* 0x000e220000002500 */
/*0020*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R2, R2, c[0x0][0x0], R3 ; /* 0x0000000002027a24 */
/* 0x001fca00078e0203 */
/*0060*/ IMAD.WIDE R2, R2, R5, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fca00078e0205 */
/*0070*/ LDG.E R0, [R2.64] ; /* 0x0000000402007981 */
/* 0x000ea4000c1e1900 */
/*0080*/ IADD3 R5, R0, c[0x0][0x168], RZ ; /* 0x00005a0000057a10 */
/* 0x004fca0007ffe0ff */
/*0090*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe2000c101904 */
/*00a0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00b0*/ BRA 0xb0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | //pass
//--gridDim=[64,1,1] --blockDim=[128,1,1]
__global__ void kernelAddConstant(int *g_a, const int b)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
g_a[idx] += b;
} | .file "tmpxft_001b5b5a_00000000-6_cudaOpenMP.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z38__device_stub__Z17kernelAddConstantPiiPii
.type _Z38__device_stub__Z17kernelAddConstantPiiPii, @function
_Z38__device_stub__Z17kernelAddConstantPiiPii:
.LFB2051:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movl %esi, 4(%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
leaq 4(%rsp), %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z17kernelAddConstantPii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z38__device_stub__Z17kernelAddConstantPiiPii, .-_Z38__device_stub__Z17kernelAddConstantPiiPii
.globl _Z17kernelAddConstantPii
.type _Z17kernelAddConstantPii, @function
_Z17kernelAddConstantPii:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z38__device_stub__Z17kernelAddConstantPiiPii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z17kernelAddConstantPii, .-_Z17kernelAddConstantPii
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z17kernelAddConstantPii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z17kernelAddConstantPii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | //pass
//--gridDim=[64,1,1] --blockDim=[128,1,1]
__global__ void kernelAddConstant(int *g_a, const int b)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
g_a[idx] += b;
} | #include <hip/hip_runtime.h>
//pass
//--gridDim=[64,1,1] --blockDim=[128,1,1]
__global__ void kernelAddConstant(int *g_a, const int b)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
g_a[idx] += b;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
//pass
//--gridDim=[64,1,1] --blockDim=[128,1,1]
__global__ void kernelAddConstant(int *g_a, const int b)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
g_a[idx] += b;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z17kernelAddConstantPii
.globl _Z17kernelAddConstantPii
.p2align 8
.type _Z17kernelAddConstantPii,@function
_Z17kernelAddConstantPii:
s_load_b32 s2, s[0:1], 0x1c
s_waitcnt lgkmcnt(0)
s_and_b32 s4, s2, 0xffff
s_load_b64 s[2:3], s[0:1], 0x0
v_mad_u64_u32 v[1:2], null, s15, s4, v[0:1]
s_load_b32 s0, s[0:1], 0x8
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v2, 31, v1
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, s2, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
global_load_b32 v2, v[0:1], off
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v2, s0, v2
global_store_b32 v[0:1], v2, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z17kernelAddConstantPii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z17kernelAddConstantPii, .Lfunc_end0-_Z17kernelAddConstantPii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z17kernelAddConstantPii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z17kernelAddConstantPii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
//pass
//--gridDim=[64,1,1] --blockDim=[128,1,1]
__global__ void kernelAddConstant(int *g_a, const int b)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
g_a[idx] += b;
} | .text
.file "cudaOpenMP.hip"
.globl _Z32__device_stub__kernelAddConstantPii # -- Begin function _Z32__device_stub__kernelAddConstantPii
.p2align 4, 0x90
.type _Z32__device_stub__kernelAddConstantPii,@function
_Z32__device_stub__kernelAddConstantPii: # @_Z32__device_stub__kernelAddConstantPii
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movl %esi, 4(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 4(%rsp), %rax
movq %rax, 72(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z17kernelAddConstantPii, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z32__device_stub__kernelAddConstantPii, .Lfunc_end0-_Z32__device_stub__kernelAddConstantPii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z17kernelAddConstantPii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z17kernelAddConstantPii,@object # @_Z17kernelAddConstantPii
.section .rodata,"a",@progbits
.globl _Z17kernelAddConstantPii
.p2align 3, 0x0
_Z17kernelAddConstantPii:
.quad _Z32__device_stub__kernelAddConstantPii
.size _Z17kernelAddConstantPii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z17kernelAddConstantPii"
.size .L__unnamed_1, 25
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z32__device_stub__kernelAddConstantPii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z17kernelAddConstantPii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z17kernelAddConstantPii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */
/* 0x000e220000002500 */
/*0020*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R2, R2, c[0x0][0x0], R3 ; /* 0x0000000002027a24 */
/* 0x001fca00078e0203 */
/*0060*/ IMAD.WIDE R2, R2, R5, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fca00078e0205 */
/*0070*/ LDG.E R0, [R2.64] ; /* 0x0000000402007981 */
/* 0x000ea4000c1e1900 */
/*0080*/ IADD3 R5, R0, c[0x0][0x168], RZ ; /* 0x00005a0000057a10 */
/* 0x004fca0007ffe0ff */
/*0090*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe2000c101904 */
/*00a0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00b0*/ BRA 0xb0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z17kernelAddConstantPii
.globl _Z17kernelAddConstantPii
.p2align 8
.type _Z17kernelAddConstantPii,@function
_Z17kernelAddConstantPii:
s_load_b32 s2, s[0:1], 0x1c
s_waitcnt lgkmcnt(0)
s_and_b32 s4, s2, 0xffff
s_load_b64 s[2:3], s[0:1], 0x0
v_mad_u64_u32 v[1:2], null, s15, s4, v[0:1]
s_load_b32 s0, s[0:1], 0x8
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v2, 31, v1
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, s2, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
global_load_b32 v2, v[0:1], off
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v2, s0, v2
global_store_b32 v[0:1], v2, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z17kernelAddConstantPii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z17kernelAddConstantPii, .Lfunc_end0-_Z17kernelAddConstantPii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z17kernelAddConstantPii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z17kernelAddConstantPii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_001b5b5a_00000000-6_cudaOpenMP.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z38__device_stub__Z17kernelAddConstantPiiPii
.type _Z38__device_stub__Z17kernelAddConstantPiiPii, @function
_Z38__device_stub__Z17kernelAddConstantPiiPii:
.LFB2051:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movl %esi, 4(%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
leaq 4(%rsp), %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z17kernelAddConstantPii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z38__device_stub__Z17kernelAddConstantPiiPii, .-_Z38__device_stub__Z17kernelAddConstantPiiPii
.globl _Z17kernelAddConstantPii
.type _Z17kernelAddConstantPii, @function
_Z17kernelAddConstantPii:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z38__device_stub__Z17kernelAddConstantPiiPii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z17kernelAddConstantPii, .-_Z17kernelAddConstantPii
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z17kernelAddConstantPii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z17kernelAddConstantPii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "cudaOpenMP.hip"
.globl _Z32__device_stub__kernelAddConstantPii # -- Begin function _Z32__device_stub__kernelAddConstantPii
.p2align 4, 0x90
.type _Z32__device_stub__kernelAddConstantPii,@function
_Z32__device_stub__kernelAddConstantPii: # @_Z32__device_stub__kernelAddConstantPii
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movl %esi, 4(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 4(%rsp), %rax
movq %rax, 72(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z17kernelAddConstantPii, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z32__device_stub__kernelAddConstantPii, .Lfunc_end0-_Z32__device_stub__kernelAddConstantPii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z17kernelAddConstantPii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z17kernelAddConstantPii,@object # @_Z17kernelAddConstantPii
.section .rodata,"a",@progbits
.globl _Z17kernelAddConstantPii
.p2align 3, 0x0
_Z17kernelAddConstantPii:
.quad _Z32__device_stub__kernelAddConstantPii
.size _Z17kernelAddConstantPii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z17kernelAddConstantPii"
.size .L__unnamed_1, 25
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z32__device_stub__kernelAddConstantPii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z17kernelAddConstantPii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | /* ================================================================
*
* PyCA Project
*
* Copyright (c) J. Samuel Preston, Linh K. Ha, Sarang C. Joshi. All
* rights reserved. See Copyright.txt or for details.
*
* This software is distributed WITHOUT ANY WARRANTY; without even the
* implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the above copyright notice for more information.
*
* ================================================================ */
#ifndef __REDUCE_STREAM_KERNEL_CU
#define __REDUCE_STREAM_KERNEL_CU
#if __CUDA_ARCH__ != 200
#define NUMBER_OF_CORES (240 * 16)
#else
#define NUMBER_OF_CORES (240)
#endif
#define OPTIMAL_THREAD_SIZE 64
#define MAX_NUMBER_OF_REDUCE_STREAM 8
#define M NUMBER_OF_CORES // Number of core
#define N OPTIMAL_THREAD_SIZE // Best number of thread per block
#define REDUCE_STREAM_SIZE (M * N)
namespace PyCA {
template<typename T, class op>
__global__ void reduce_L1_kernel(T *res, const T* d_i, int size) {
__shared__ T shm[N];
int idx=blockIdx.x*blockDim.x+threadIdx.x;
T s=op::identity();
int j=idx;
for(; j + M * N <size; j+=M*N*2)
op::iop(s, op::op(d_i[j], d_i[j + M*N]));
shm[threadIdx.x]= (j < size) ? op::op(s, d_i[j]) : s;
__syncthreads();
if(threadIdx.x==0) {
T s=op::identity();
for(int i=0; i<N; i++)
op::iop(s, shm[i]);
res[blockIdx.x]=s;
}
}
template<typename T, class op, class op1>
__global__ void compReduce_L1_kernel(T *res, const T* d_i, int size) {
__shared__ T shm[N];
int idx=blockIdx.x*blockDim.x+threadIdx.x;
T s=op::identity();
int j=idx;
for(; j + M * N <size; j+=M*N*2)
op::iop(s, op::op(op1::op(d_i[j]), op1::op(d_i[j + M*N])));
shm[threadIdx.x]= (j < size) ? op::op(s, op1::op(d_i[j])) : s;
__syncthreads();
if(threadIdx.x==0) {
T s=op::identity();
for(int i=0; i<N; i++)
op::iop(s, shm[i]);
res[blockIdx.x]=s;
}
}
template<typename T, class op, class op1>
__global__ void product_L1_kernel(T *res, const T* d_i0, const T* d_i1, int size) {
__shared__ T shm[N];
int idx=blockIdx.x*blockDim.x+threadIdx.x;
T s=op::identity();
int j=idx;
for(; j + M * N <size; j+=M*N*2)
op::iop(s, op::op(op1::op(d_i0[j],d_i1[j]), op1::op(d_i0[j + M*N], d_i1[j + M*N])));
shm[threadIdx.x]= (j < size) ? op::op(s, op1::op(d_i0[j],d_i1[j])) : s;
__syncthreads();
if(threadIdx.x==0) {
T s=op::identity();
for(int i=0; i<N; i++)
op::iop(s, shm[i]);
res[blockIdx.x]=s;
}
}
////////////////////////////////////////////////////////////////////////////////
template<typename T, class op, bool accumulate>
__device__ void reduce_L2_dev(T& d_o, const T* d_i) {
__shared__ T shm[N];
uint idx=threadIdx.x;
T s=op::identity();
#if M%(2*N)==0
for(uint j=idx; j<M; j+=N*2)
op::iop(s, op::op(d_i[j], d_i[j + N]));
#else
for(uint j=idx; j<M; j+=N)
op::iop(s, d_i[j]);
#endif
shm[threadIdx.x]=s;
__syncthreads();
if(idx==0) {
T s=op::identity();
for(uint i=0; i<N; i++)
op::iop(s, shm[i]);
d_o=(accumulate) ? op::op(d_o, s) : s;
}
}
template<typename T, class op>
__global__ void reduce_L2_kernel(T *d_i) {
reduce_L2_dev<T, op, false>(d_i[0], d_i);
}
template<typename T, class op, bool accumulate>
__global__ void reduce_L2_kernel(T* d_o, const T* d_i) {
reduce_L2_dev<T, op, accumulate>(d_o[0], d_i);
}
////////////////////////////////////////////////////////////////////////////////
template<typename T, class op, class op1>
__device__ void bireduce_L1_dev(T& res, T& res1, const T* d_i, int size) {
__shared__ T shm[N];
__shared__ T shm1[N];
int idx=blockIdx.x*blockDim.x+threadIdx.x;
T s =op::identity();
T s1=op1::identity();
int j=idx;
for(; j + M * N <size; j+=M*N*2) {
op::iop(s, op::op(d_i[j], d_i[j + M*N]));
op1::iop(s1, op1::op(d_i[j], d_i[j + M*N]));
}
shm[threadIdx.x] = (j < size) ? op::op(s, d_i[j]) : s;
shm1[threadIdx.x] = (j < size) ? op1::op(s1, d_i[j]) : s1;
__syncthreads();
#if 1
if(threadIdx.x==0) {
T s =op::identity();
T s1=op1::identity();
for(int i=0; i<N; i++){
op::iop(s, shm[i]);
op1::iop(s1, shm1[i]);
}
res = s;
res1 = s1;
}
#else
if(threadIdx.x==0) {
T s =op::identity();
for(int i=0; i<N; i++)
op::iop(s, shm[i]);
res = s;
}
if(threadIdx.x==1) {
T s1 =op1::identity();
for(int i=0; i<N; i++)
op1::iop(s1, shm1[i]);
res1 = s1;
}
#endif
}
template<typename T, class op, class op1>
__global__ void bireduce_L1_kernel(T *res, T* res1, const T* d_i, int size) {
int blockId = blockIdx.x;
bireduce_L1_dev<T, op, op1>
(res[blockId], res1[blockId], d_i, size);
}
template<typename T, class op, class op1, bool accumulate>
__device__ void reduce_ip2_op2_L2_dev(T& d_o, T& d_o1, const T* d_i, const T* d_i1) {
__shared__ T shm[N];
__shared__ T shm1[N];
uint idx=threadIdx.x;
T s =op::identity();
T s1=op1::identity();
#if M%(2*N)==0
for(uint j=idx; j<M; j+=N*2){
op::iop(s, op::op(d_i[j], d_i[j + N]));
op1::iop(s1, op1::op(d_i1[j], d_i1[j + N]));
}
#else
for(uint j=idx; j<M; j+=N){
op::iop(s, d_i[j]);
op1::iop(s1, d_i1[j]);
}
#endif
shm[threadIdx.x]=s;
shm1[threadIdx.x]=s1;
__syncthreads();
if(idx==0) {
T s=op::identity();
T s1=op1::identity();
for(uint i=0; i<N; i++){
op::iop(s, shm[i]);
op1::iop(s1, shm1[i]);
}
d_o = accumulate ? op::op(d_o, s) : s;
d_o1 = accumulate ? op1::op(d_o1, s1) : s1;
}
}
template<typename T, class op, class op1, bool accumulate>
__global__ void reduce_ip2_op2_L2_kernel(T* d_o, T* d_o1, const T* d_i, const T* d_i1) {
reduce_ip2_op2_L2_dev<T, op, op1, accumulate>(d_o[0], d_o1[0], d_i, d_i1);
}
template<typename T, class op, class op1, bool accumulate>
__global__ void reduce_ip2_op2_L2_kernel(T* d_o, T *d_i, const T* d_i1) {
reduce_ip2_op2_L2_dev<T, op, op1, accumulate>(d_o[0], d_o[1], d_i, d_i1);
}
////////////////////////////////////////////////////////////////////////////////
template<typename T, class op, class op1, class op2>
__device__ void trireduce_L1_dev(T& res, T& res1, T& res2, const T* d_i, int size) {
__shared__ T shm[N];
__shared__ T shm1[N];
__shared__ T shm2[N];
int idx=blockIdx.x*blockDim.x+threadIdx.x;
T s =op::identity();
T s1=op1::identity();
T s2=op2::identity();
int j=idx;
for(; j + M * N <size; j+=M*N*2) {
op::iop(s, op::op(d_i[j], d_i[j + M*N]));
op1::iop(s1, op1::op(d_i[j], d_i[j + M*N]));
op2::iop(s2, op2::op(d_i[j], d_i[j + M*N]));
}
shm[threadIdx.x] = (j < size) ? op::op(s, d_i[j]) : s;
shm1[threadIdx.x] = (j < size) ? op1::op(s1, d_i[j]) : s1;
shm2[threadIdx.x] = (j < size) ? op2::op(s2, d_i[j]) : s2;
__syncthreads();
#if 1
if(threadIdx.x==0) {
T s =op::identity();
T s1=op1::identity();
T s2=op2::identity();
for(int i=0; i<N; i++){
op::iop(s, shm[i]);
op1::iop(s1, shm1[i]);
op2::iop(s2, shm2[i]);
}
res = s;
res1 = s1;
res2 = s2;
}
#else
if(threadIdx.x==0) {
T s =op::identity();
for(int i=0; i<N; i++)
op::iop(s, shm[i]);
res = s;
}
if(threadIdx.x==1) {
T s1 =op1::identity();
for(int i=0; i<N; i++)
op1::iop(s1, shm1[i]);
res1 = s1;
}
if(threadIdx.x==2) {
T s2 =op2::identity();
for(int i=0; i<N; i++)
op2::iop(s2, shm2[i]);
res2 = s2;
}
#endif
}
template<typename T, class op, class op1, class op2, bool accumulate>
__global__ void trireduce_L1_kernel(T *res, T* res1, T* res2,
const T* d_i, int size) {
int blockId = blockIdx.x;
trireduce_L1_dev<T, op, op1, op2, accumulate>
(res[blockId], res1[blockId], res2[blockId], d_i, size);
}
template<typename T, class op, class op1, class op2, bool accumulate>
__device__ void reduce_ip3_op3_L2_dev(T& d_o, T& d_o1, T& d_o2,
const T* d_i, const T* d_i1, const T* d_i2) {
__shared__ T shm[N];
__shared__ T shm1[N];
__shared__ T shm2[N];
uint idx=threadIdx.x;
T s =op::identity();
T s1=op1::identity();
T s2=op2::identity();
#if M%(2*N)==0
for(uint j=idx; j<M; j+=N*2){
op::iop(s, op::op(d_i[j], d_i[j + N]));
op1::iop(s1, op1::op(d_i1[j], d_i1[j + N]));
op2::iop(s2, op2::op(d_i2[j], d_i2[j + N]));
}
#else
for(uint j=idx; j<M; j+=N){
op::iop(s, d_i[j]);
op1::iop(s1, d_i1[j]);
op2::iop(s2, d_i2[j]);
}
#endif
shm[threadIdx.x]=s;
shm1[threadIdx.x]=s1;
shm2[threadIdx.x]=s2;
__syncthreads();
if(idx==0) {
T s=op::identity();
T s1=op1::identity();
T s2=op2::identity();
for(uint i=0; i<N; i++){
op::iop(s, shm[i]);
op1::iop(s1, shm1[i]);
op2::iop(s2, shm2[i]);
}
d_o = accumulate ? op::op(d_o, s) : s;
d_o1 = accumulate ? op1::op(d_o1, s1) : s1;
d_o2 = accumulate ? op2::op(d_o2, s2) : s2;
}
}
template<typename T, class op, class op1, bool accumulate>
__global__ void reduce_ip3_op3_L3_kernel(T* d_o, T* d_o1, T* d_o2,
const T* d_i, const T* d_i1, const T* d_i2) {
reduce_ip3_op3_L2_dev<T, op, op1, accumulate>(d_o[0], d_o1[0], d_o2[0],
d_i, d_i1, d_i2);
}
template<typename T, class op, class op1, bool accumulate>
__global__ void reduce_ip3_op3_L3_kernel(T* d_o, const T* d_i, const T* d_i1, const T* d_i2) {
reduce_ip3_op3_L2_dev<T, op, op1, accumulate>(d_o[0], d_o[1], d_o[2],
d_i, d_i1, d_i2);
}
#endif
} // end namespace PyCA | code for sm_80 |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | /* ================================================================
*
* PyCA Project
*
* Copyright (c) J. Samuel Preston, Linh K. Ha, Sarang C. Joshi. All
* rights reserved. See Copyright.txt or for details.
*
* This software is distributed WITHOUT ANY WARRANTY; without even the
* implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the above copyright notice for more information.
*
* ================================================================ */
#ifndef __REDUCE_STREAM_KERNEL_CU
#define __REDUCE_STREAM_KERNEL_CU
#if __CUDA_ARCH__ != 200
#define NUMBER_OF_CORES (240 * 16)
#else
#define NUMBER_OF_CORES (240)
#endif
#define OPTIMAL_THREAD_SIZE 64
#define MAX_NUMBER_OF_REDUCE_STREAM 8
#define M NUMBER_OF_CORES // Number of core
#define N OPTIMAL_THREAD_SIZE // Best number of thread per block
#define REDUCE_STREAM_SIZE (M * N)
namespace PyCA {
template<typename T, class op>
__global__ void reduce_L1_kernel(T *res, const T* d_i, int size) {
__shared__ T shm[N];
int idx=blockIdx.x*blockDim.x+threadIdx.x;
T s=op::identity();
int j=idx;
for(; j + M * N <size; j+=M*N*2)
op::iop(s, op::op(d_i[j], d_i[j + M*N]));
shm[threadIdx.x]= (j < size) ? op::op(s, d_i[j]) : s;
__syncthreads();
if(threadIdx.x==0) {
T s=op::identity();
for(int i=0; i<N; i++)
op::iop(s, shm[i]);
res[blockIdx.x]=s;
}
}
template<typename T, class op, class op1>
__global__ void compReduce_L1_kernel(T *res, const T* d_i, int size) {
__shared__ T shm[N];
int idx=blockIdx.x*blockDim.x+threadIdx.x;
T s=op::identity();
int j=idx;
for(; j + M * N <size; j+=M*N*2)
op::iop(s, op::op(op1::op(d_i[j]), op1::op(d_i[j + M*N])));
shm[threadIdx.x]= (j < size) ? op::op(s, op1::op(d_i[j])) : s;
__syncthreads();
if(threadIdx.x==0) {
T s=op::identity();
for(int i=0; i<N; i++)
op::iop(s, shm[i]);
res[blockIdx.x]=s;
}
}
template<typename T, class op, class op1>
__global__ void product_L1_kernel(T *res, const T* d_i0, const T* d_i1, int size) {
__shared__ T shm[N];
int idx=blockIdx.x*blockDim.x+threadIdx.x;
T s=op::identity();
int j=idx;
for(; j + M * N <size; j+=M*N*2)
op::iop(s, op::op(op1::op(d_i0[j],d_i1[j]), op1::op(d_i0[j + M*N], d_i1[j + M*N])));
shm[threadIdx.x]= (j < size) ? op::op(s, op1::op(d_i0[j],d_i1[j])) : s;
__syncthreads();
if(threadIdx.x==0) {
T s=op::identity();
for(int i=0; i<N; i++)
op::iop(s, shm[i]);
res[blockIdx.x]=s;
}
}
////////////////////////////////////////////////////////////////////////////////
template<typename T, class op, bool accumulate>
__device__ void reduce_L2_dev(T& d_o, const T* d_i) {
__shared__ T shm[N];
uint idx=threadIdx.x;
T s=op::identity();
#if M%(2*N)==0
for(uint j=idx; j<M; j+=N*2)
op::iop(s, op::op(d_i[j], d_i[j + N]));
#else
for(uint j=idx; j<M; j+=N)
op::iop(s, d_i[j]);
#endif
shm[threadIdx.x]=s;
__syncthreads();
if(idx==0) {
T s=op::identity();
for(uint i=0; i<N; i++)
op::iop(s, shm[i]);
d_o=(accumulate) ? op::op(d_o, s) : s;
}
}
template<typename T, class op>
__global__ void reduce_L2_kernel(T *d_i) {
reduce_L2_dev<T, op, false>(d_i[0], d_i);
}
template<typename T, class op, bool accumulate>
__global__ void reduce_L2_kernel(T* d_o, const T* d_i) {
reduce_L2_dev<T, op, accumulate>(d_o[0], d_i);
}
////////////////////////////////////////////////////////////////////////////////
template<typename T, class op, class op1>
__device__ void bireduce_L1_dev(T& res, T& res1, const T* d_i, int size) {
__shared__ T shm[N];
__shared__ T shm1[N];
int idx=blockIdx.x*blockDim.x+threadIdx.x;
T s =op::identity();
T s1=op1::identity();
int j=idx;
for(; j + M * N <size; j+=M*N*2) {
op::iop(s, op::op(d_i[j], d_i[j + M*N]));
op1::iop(s1, op1::op(d_i[j], d_i[j + M*N]));
}
shm[threadIdx.x] = (j < size) ? op::op(s, d_i[j]) : s;
shm1[threadIdx.x] = (j < size) ? op1::op(s1, d_i[j]) : s1;
__syncthreads();
#if 1
if(threadIdx.x==0) {
T s =op::identity();
T s1=op1::identity();
for(int i=0; i<N; i++){
op::iop(s, shm[i]);
op1::iop(s1, shm1[i]);
}
res = s;
res1 = s1;
}
#else
if(threadIdx.x==0) {
T s =op::identity();
for(int i=0; i<N; i++)
op::iop(s, shm[i]);
res = s;
}
if(threadIdx.x==1) {
T s1 =op1::identity();
for(int i=0; i<N; i++)
op1::iop(s1, shm1[i]);
res1 = s1;
}
#endif
}
template<typename T, class op, class op1>
__global__ void bireduce_L1_kernel(T *res, T* res1, const T* d_i, int size) {
int blockId = blockIdx.x;
bireduce_L1_dev<T, op, op1>
(res[blockId], res1[blockId], d_i, size);
}
template<typename T, class op, class op1, bool accumulate>
__device__ void reduce_ip2_op2_L2_dev(T& d_o, T& d_o1, const T* d_i, const T* d_i1) {
__shared__ T shm[N];
__shared__ T shm1[N];
uint idx=threadIdx.x;
T s =op::identity();
T s1=op1::identity();
#if M%(2*N)==0
for(uint j=idx; j<M; j+=N*2){
op::iop(s, op::op(d_i[j], d_i[j + N]));
op1::iop(s1, op1::op(d_i1[j], d_i1[j + N]));
}
#else
for(uint j=idx; j<M; j+=N){
op::iop(s, d_i[j]);
op1::iop(s1, d_i1[j]);
}
#endif
shm[threadIdx.x]=s;
shm1[threadIdx.x]=s1;
__syncthreads();
if(idx==0) {
T s=op::identity();
T s1=op1::identity();
for(uint i=0; i<N; i++){
op::iop(s, shm[i]);
op1::iop(s1, shm1[i]);
}
d_o = accumulate ? op::op(d_o, s) : s;
d_o1 = accumulate ? op1::op(d_o1, s1) : s1;
}
}
template<typename T, class op, class op1, bool accumulate>
__global__ void reduce_ip2_op2_L2_kernel(T* d_o, T* d_o1, const T* d_i, const T* d_i1) {
reduce_ip2_op2_L2_dev<T, op, op1, accumulate>(d_o[0], d_o1[0], d_i, d_i1);
}
template<typename T, class op, class op1, bool accumulate>
__global__ void reduce_ip2_op2_L2_kernel(T* d_o, T *d_i, const T* d_i1) {
reduce_ip2_op2_L2_dev<T, op, op1, accumulate>(d_o[0], d_o[1], d_i, d_i1);
}
////////////////////////////////////////////////////////////////////////////////
template<typename T, class op, class op1, class op2>
__device__ void trireduce_L1_dev(T& res, T& res1, T& res2, const T* d_i, int size) {
__shared__ T shm[N];
__shared__ T shm1[N];
__shared__ T shm2[N];
int idx=blockIdx.x*blockDim.x+threadIdx.x;
T s =op::identity();
T s1=op1::identity();
T s2=op2::identity();
int j=idx;
for(; j + M * N <size; j+=M*N*2) {
op::iop(s, op::op(d_i[j], d_i[j + M*N]));
op1::iop(s1, op1::op(d_i[j], d_i[j + M*N]));
op2::iop(s2, op2::op(d_i[j], d_i[j + M*N]));
}
shm[threadIdx.x] = (j < size) ? op::op(s, d_i[j]) : s;
shm1[threadIdx.x] = (j < size) ? op1::op(s1, d_i[j]) : s1;
shm2[threadIdx.x] = (j < size) ? op2::op(s2, d_i[j]) : s2;
__syncthreads();
#if 1
if(threadIdx.x==0) {
T s =op::identity();
T s1=op1::identity();
T s2=op2::identity();
for(int i=0; i<N; i++){
op::iop(s, shm[i]);
op1::iop(s1, shm1[i]);
op2::iop(s2, shm2[i]);
}
res = s;
res1 = s1;
res2 = s2;
}
#else
if(threadIdx.x==0) {
T s =op::identity();
for(int i=0; i<N; i++)
op::iop(s, shm[i]);
res = s;
}
if(threadIdx.x==1) {
T s1 =op1::identity();
for(int i=0; i<N; i++)
op1::iop(s1, shm1[i]);
res1 = s1;
}
if(threadIdx.x==2) {
T s2 =op2::identity();
for(int i=0; i<N; i++)
op2::iop(s2, shm2[i]);
res2 = s2;
}
#endif
}
template<typename T, class op, class op1, class op2, bool accumulate>
__global__ void trireduce_L1_kernel(T *res, T* res1, T* res2,
const T* d_i, int size) {
int blockId = blockIdx.x;
trireduce_L1_dev<T, op, op1, op2, accumulate>
(res[blockId], res1[blockId], res2[blockId], d_i, size);
}
template<typename T, class op, class op1, class op2, bool accumulate>
__device__ void reduce_ip3_op3_L2_dev(T& d_o, T& d_o1, T& d_o2,
const T* d_i, const T* d_i1, const T* d_i2) {
__shared__ T shm[N];
__shared__ T shm1[N];
__shared__ T shm2[N];
uint idx=threadIdx.x;
T s =op::identity();
T s1=op1::identity();
T s2=op2::identity();
#if M%(2*N)==0
for(uint j=idx; j<M; j+=N*2){
op::iop(s, op::op(d_i[j], d_i[j + N]));
op1::iop(s1, op1::op(d_i1[j], d_i1[j + N]));
op2::iop(s2, op2::op(d_i2[j], d_i2[j + N]));
}
#else
for(uint j=idx; j<M; j+=N){
op::iop(s, d_i[j]);
op1::iop(s1, d_i1[j]);
op2::iop(s2, d_i2[j]);
}
#endif
shm[threadIdx.x]=s;
shm1[threadIdx.x]=s1;
shm2[threadIdx.x]=s2;
__syncthreads();
if(idx==0) {
T s=op::identity();
T s1=op1::identity();
T s2=op2::identity();
for(uint i=0; i<N; i++){
op::iop(s, shm[i]);
op1::iop(s1, shm1[i]);
op2::iop(s2, shm2[i]);
}
d_o = accumulate ? op::op(d_o, s) : s;
d_o1 = accumulate ? op1::op(d_o1, s1) : s1;
d_o2 = accumulate ? op2::op(d_o2, s2) : s2;
}
}
template<typename T, class op, class op1, bool accumulate>
__global__ void reduce_ip3_op3_L3_kernel(T* d_o, T* d_o1, T* d_o2,
const T* d_i, const T* d_i1, const T* d_i2) {
reduce_ip3_op3_L2_dev<T, op, op1, accumulate>(d_o[0], d_o1[0], d_o2[0],
d_i, d_i1, d_i2);
}
template<typename T, class op, class op1, bool accumulate>
__global__ void reduce_ip3_op3_L3_kernel(T* d_o, const T* d_i, const T* d_i1, const T* d_i2) {
reduce_ip3_op3_L2_dev<T, op, op1, accumulate>(d_o[0], d_o[1], d_o[2],
d_i, d_i1, d_i2);
}
#endif
} // end namespace PyCA | .file "tmpxft_00011158_00000000-6_ReduceStreamKernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2056:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2056:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2079:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2079:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | /* ================================================================
*
* PyCA Project
*
* Copyright (c) J. Samuel Preston, Linh K. Ha, Sarang C. Joshi. All
* rights reserved. See Copyright.txt or for details.
*
* This software is distributed WITHOUT ANY WARRANTY; without even the
* implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the above copyright notice for more information.
*
* ================================================================ */
#ifndef __REDUCE_STREAM_KERNEL_CU
#define __REDUCE_STREAM_KERNEL_CU
#if __CUDA_ARCH__ != 200
#define NUMBER_OF_CORES (240 * 16)
#else
#define NUMBER_OF_CORES (240)
#endif
#define OPTIMAL_THREAD_SIZE 64
#define MAX_NUMBER_OF_REDUCE_STREAM 8
#define M NUMBER_OF_CORES // Number of core
#define N OPTIMAL_THREAD_SIZE // Best number of thread per block
#define REDUCE_STREAM_SIZE (M * N)
namespace PyCA {
template<typename T, class op>
__global__ void reduce_L1_kernel(T *res, const T* d_i, int size) {
__shared__ T shm[N];
int idx=blockIdx.x*blockDim.x+threadIdx.x;
T s=op::identity();
int j=idx;
for(; j + M * N <size; j+=M*N*2)
op::iop(s, op::op(d_i[j], d_i[j + M*N]));
shm[threadIdx.x]= (j < size) ? op::op(s, d_i[j]) : s;
__syncthreads();
if(threadIdx.x==0) {
T s=op::identity();
for(int i=0; i<N; i++)
op::iop(s, shm[i]);
res[blockIdx.x]=s;
}
}
template<typename T, class op, class op1>
__global__ void compReduce_L1_kernel(T *res, const T* d_i, int size) {
__shared__ T shm[N];
int idx=blockIdx.x*blockDim.x+threadIdx.x;
T s=op::identity();
int j=idx;
for(; j + M * N <size; j+=M*N*2)
op::iop(s, op::op(op1::op(d_i[j]), op1::op(d_i[j + M*N])));
shm[threadIdx.x]= (j < size) ? op::op(s, op1::op(d_i[j])) : s;
__syncthreads();
if(threadIdx.x==0) {
T s=op::identity();
for(int i=0; i<N; i++)
op::iop(s, shm[i]);
res[blockIdx.x]=s;
}
}
template<typename T, class op, class op1>
__global__ void product_L1_kernel(T *res, const T* d_i0, const T* d_i1, int size) {
__shared__ T shm[N];
int idx=blockIdx.x*blockDim.x+threadIdx.x;
T s=op::identity();
int j=idx;
for(; j + M * N <size; j+=M*N*2)
op::iop(s, op::op(op1::op(d_i0[j],d_i1[j]), op1::op(d_i0[j + M*N], d_i1[j + M*N])));
shm[threadIdx.x]= (j < size) ? op::op(s, op1::op(d_i0[j],d_i1[j])) : s;
__syncthreads();
if(threadIdx.x==0) {
T s=op::identity();
for(int i=0; i<N; i++)
op::iop(s, shm[i]);
res[blockIdx.x]=s;
}
}
////////////////////////////////////////////////////////////////////////////////
template<typename T, class op, bool accumulate>
__device__ void reduce_L2_dev(T& d_o, const T* d_i) {
__shared__ T shm[N];
uint idx=threadIdx.x;
T s=op::identity();
#if M%(2*N)==0
for(uint j=idx; j<M; j+=N*2)
op::iop(s, op::op(d_i[j], d_i[j + N]));
#else
for(uint j=idx; j<M; j+=N)
op::iop(s, d_i[j]);
#endif
shm[threadIdx.x]=s;
__syncthreads();
if(idx==0) {
T s=op::identity();
for(uint i=0; i<N; i++)
op::iop(s, shm[i]);
d_o=(accumulate) ? op::op(d_o, s) : s;
}
}
template<typename T, class op>
__global__ void reduce_L2_kernel(T *d_i) {
reduce_L2_dev<T, op, false>(d_i[0], d_i);
}
template<typename T, class op, bool accumulate>
__global__ void reduce_L2_kernel(T* d_o, const T* d_i) {
reduce_L2_dev<T, op, accumulate>(d_o[0], d_i);
}
////////////////////////////////////////////////////////////////////////////////
template<typename T, class op, class op1>
__device__ void bireduce_L1_dev(T& res, T& res1, const T* d_i, int size) {
__shared__ T shm[N];
__shared__ T shm1[N];
int idx=blockIdx.x*blockDim.x+threadIdx.x;
T s =op::identity();
T s1=op1::identity();
int j=idx;
for(; j + M * N <size; j+=M*N*2) {
op::iop(s, op::op(d_i[j], d_i[j + M*N]));
op1::iop(s1, op1::op(d_i[j], d_i[j + M*N]));
}
shm[threadIdx.x] = (j < size) ? op::op(s, d_i[j]) : s;
shm1[threadIdx.x] = (j < size) ? op1::op(s1, d_i[j]) : s1;
__syncthreads();
#if 1
if(threadIdx.x==0) {
T s =op::identity();
T s1=op1::identity();
for(int i=0; i<N; i++){
op::iop(s, shm[i]);
op1::iop(s1, shm1[i]);
}
res = s;
res1 = s1;
}
#else
if(threadIdx.x==0) {
T s =op::identity();
for(int i=0; i<N; i++)
op::iop(s, shm[i]);
res = s;
}
if(threadIdx.x==1) {
T s1 =op1::identity();
for(int i=0; i<N; i++)
op1::iop(s1, shm1[i]);
res1 = s1;
}
#endif
}
template<typename T, class op, class op1>
__global__ void bireduce_L1_kernel(T *res, T* res1, const T* d_i, int size) {
int blockId = blockIdx.x;
bireduce_L1_dev<T, op, op1>
(res[blockId], res1[blockId], d_i, size);
}
template<typename T, class op, class op1, bool accumulate>
__device__ void reduce_ip2_op2_L2_dev(T& d_o, T& d_o1, const T* d_i, const T* d_i1) {
__shared__ T shm[N];
__shared__ T shm1[N];
uint idx=threadIdx.x;
T s =op::identity();
T s1=op1::identity();
#if M%(2*N)==0
for(uint j=idx; j<M; j+=N*2){
op::iop(s, op::op(d_i[j], d_i[j + N]));
op1::iop(s1, op1::op(d_i1[j], d_i1[j + N]));
}
#else
for(uint j=idx; j<M; j+=N){
op::iop(s, d_i[j]);
op1::iop(s1, d_i1[j]);
}
#endif
shm[threadIdx.x]=s;
shm1[threadIdx.x]=s1;
__syncthreads();
if(idx==0) {
T s=op::identity();
T s1=op1::identity();
for(uint i=0; i<N; i++){
op::iop(s, shm[i]);
op1::iop(s1, shm1[i]);
}
d_o = accumulate ? op::op(d_o, s) : s;
d_o1 = accumulate ? op1::op(d_o1, s1) : s1;
}
}
template<typename T, class op, class op1, bool accumulate>
__global__ void reduce_ip2_op2_L2_kernel(T* d_o, T* d_o1, const T* d_i, const T* d_i1) {
reduce_ip2_op2_L2_dev<T, op, op1, accumulate>(d_o[0], d_o1[0], d_i, d_i1);
}
template<typename T, class op, class op1, bool accumulate>
__global__ void reduce_ip2_op2_L2_kernel(T* d_o, T *d_i, const T* d_i1) {
reduce_ip2_op2_L2_dev<T, op, op1, accumulate>(d_o[0], d_o[1], d_i, d_i1);
}
////////////////////////////////////////////////////////////////////////////////
template<typename T, class op, class op1, class op2>
__device__ void trireduce_L1_dev(T& res, T& res1, T& res2, const T* d_i, int size) {
__shared__ T shm[N];
__shared__ T shm1[N];
__shared__ T shm2[N];
int idx=blockIdx.x*blockDim.x+threadIdx.x;
T s =op::identity();
T s1=op1::identity();
T s2=op2::identity();
int j=idx;
for(; j + M * N <size; j+=M*N*2) {
op::iop(s, op::op(d_i[j], d_i[j + M*N]));
op1::iop(s1, op1::op(d_i[j], d_i[j + M*N]));
op2::iop(s2, op2::op(d_i[j], d_i[j + M*N]));
}
shm[threadIdx.x] = (j < size) ? op::op(s, d_i[j]) : s;
shm1[threadIdx.x] = (j < size) ? op1::op(s1, d_i[j]) : s1;
shm2[threadIdx.x] = (j < size) ? op2::op(s2, d_i[j]) : s2;
__syncthreads();
#if 1
if(threadIdx.x==0) {
T s =op::identity();
T s1=op1::identity();
T s2=op2::identity();
for(int i=0; i<N; i++){
op::iop(s, shm[i]);
op1::iop(s1, shm1[i]);
op2::iop(s2, shm2[i]);
}
res = s;
res1 = s1;
res2 = s2;
}
#else
if(threadIdx.x==0) {
T s =op::identity();
for(int i=0; i<N; i++)
op::iop(s, shm[i]);
res = s;
}
if(threadIdx.x==1) {
T s1 =op1::identity();
for(int i=0; i<N; i++)
op1::iop(s1, shm1[i]);
res1 = s1;
}
if(threadIdx.x==2) {
T s2 =op2::identity();
for(int i=0; i<N; i++)
op2::iop(s2, shm2[i]);
res2 = s2;
}
#endif
}
template<typename T, class op, class op1, class op2, bool accumulate>
__global__ void trireduce_L1_kernel(T *res, T* res1, T* res2,
const T* d_i, int size) {
int blockId = blockIdx.x;
trireduce_L1_dev<T, op, op1, op2, accumulate>
(res[blockId], res1[blockId], res2[blockId], d_i, size);
}
template<typename T, class op, class op1, class op2, bool accumulate>
__device__ void reduce_ip3_op3_L2_dev(T& d_o, T& d_o1, T& d_o2,
const T* d_i, const T* d_i1, const T* d_i2) {
__shared__ T shm[N];
__shared__ T shm1[N];
__shared__ T shm2[N];
uint idx=threadIdx.x;
T s =op::identity();
T s1=op1::identity();
T s2=op2::identity();
#if M%(2*N)==0
for(uint j=idx; j<M; j+=N*2){
op::iop(s, op::op(d_i[j], d_i[j + N]));
op1::iop(s1, op1::op(d_i1[j], d_i1[j + N]));
op2::iop(s2, op2::op(d_i2[j], d_i2[j + N]));
}
#else
for(uint j=idx; j<M; j+=N){
op::iop(s, d_i[j]);
op1::iop(s1, d_i1[j]);
op2::iop(s2, d_i2[j]);
}
#endif
shm[threadIdx.x]=s;
shm1[threadIdx.x]=s1;
shm2[threadIdx.x]=s2;
__syncthreads();
if(idx==0) {
T s=op::identity();
T s1=op1::identity();
T s2=op2::identity();
for(uint i=0; i<N; i++){
op::iop(s, shm[i]);
op1::iop(s1, shm1[i]);
op2::iop(s2, shm2[i]);
}
d_o = accumulate ? op::op(d_o, s) : s;
d_o1 = accumulate ? op1::op(d_o1, s1) : s1;
d_o2 = accumulate ? op2::op(d_o2, s2) : s2;
}
}
template<typename T, class op, class op1, bool accumulate>
__global__ void reduce_ip3_op3_L3_kernel(T* d_o, T* d_o1, T* d_o2,
const T* d_i, const T* d_i1, const T* d_i2) {
reduce_ip3_op3_L2_dev<T, op, op1, accumulate>(d_o[0], d_o1[0], d_o2[0],
d_i, d_i1, d_i2);
}
template<typename T, class op, class op1, bool accumulate>
__global__ void reduce_ip3_op3_L3_kernel(T* d_o, const T* d_i, const T* d_i1, const T* d_i2) {
reduce_ip3_op3_L2_dev<T, op, op1, accumulate>(d_o[0], d_o[1], d_o[2],
d_i, d_i1, d_i2);
}
#endif
} // end namespace PyCA | #include <hip/hip_runtime.h>
/* ================================================================
*
* PyCA Project
*
* Copyright (c) J. Samuel Preston, Linh K. Ha, Sarang C. Joshi. All
* rights reserved. See Copyright.txt or for details.
*
* This software is distributed WITHOUT ANY WARRANTY; without even the
* implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the above copyright notice for more information.
*
* ================================================================ */
#ifndef __REDUCE_STREAM_KERNEL_CU
#define __REDUCE_STREAM_KERNEL_CU
#if __CUDA_ARCH__ != 200
#define NUMBER_OF_CORES (240 * 16)
#else
#define NUMBER_OF_CORES (240)
#endif
#define OPTIMAL_THREAD_SIZE 64
#define MAX_NUMBER_OF_REDUCE_STREAM 8
#define M NUMBER_OF_CORES // Number of core
#define N OPTIMAL_THREAD_SIZE // Best number of thread per block
#define REDUCE_STREAM_SIZE (M * N)
namespace PyCA {
template<typename T, class op>
__global__ void reduce_L1_kernel(T *res, const T* d_i, int size) {
__shared__ T shm[N];
int idx=blockIdx.x*blockDim.x+threadIdx.x;
T s=op::identity();
int j=idx;
for(; j + M * N <size; j+=M*N*2)
op::iop(s, op::op(d_i[j], d_i[j + M*N]));
shm[threadIdx.x]= (j < size) ? op::op(s, d_i[j]) : s;
__syncthreads();
if(threadIdx.x==0) {
T s=op::identity();
for(int i=0; i<N; i++)
op::iop(s, shm[i]);
res[blockIdx.x]=s;
}
}
template<typename T, class op, class op1>
__global__ void compReduce_L1_kernel(T *res, const T* d_i, int size) {
__shared__ T shm[N];
int idx=blockIdx.x*blockDim.x+threadIdx.x;
T s=op::identity();
int j=idx;
for(; j + M * N <size; j+=M*N*2)
op::iop(s, op::op(op1::op(d_i[j]), op1::op(d_i[j + M*N])));
shm[threadIdx.x]= (j < size) ? op::op(s, op1::op(d_i[j])) : s;
__syncthreads();
if(threadIdx.x==0) {
T s=op::identity();
for(int i=0; i<N; i++)
op::iop(s, shm[i]);
res[blockIdx.x]=s;
}
}
template<typename T, class op, class op1>
__global__ void product_L1_kernel(T *res, const T* d_i0, const T* d_i1, int size) {
__shared__ T shm[N];
int idx=blockIdx.x*blockDim.x+threadIdx.x;
T s=op::identity();
int j=idx;
for(; j + M * N <size; j+=M*N*2)
op::iop(s, op::op(op1::op(d_i0[j],d_i1[j]), op1::op(d_i0[j + M*N], d_i1[j + M*N])));
shm[threadIdx.x]= (j < size) ? op::op(s, op1::op(d_i0[j],d_i1[j])) : s;
__syncthreads();
if(threadIdx.x==0) {
T s=op::identity();
for(int i=0; i<N; i++)
op::iop(s, shm[i]);
res[blockIdx.x]=s;
}
}
////////////////////////////////////////////////////////////////////////////////
template<typename T, class op, bool accumulate>
__device__ void reduce_L2_dev(T& d_o, const T* d_i) {
__shared__ T shm[N];
uint idx=threadIdx.x;
T s=op::identity();
#if M%(2*N)==0
for(uint j=idx; j<M; j+=N*2)
op::iop(s, op::op(d_i[j], d_i[j + N]));
#else
for(uint j=idx; j<M; j+=N)
op::iop(s, d_i[j]);
#endif
shm[threadIdx.x]=s;
__syncthreads();
if(idx==0) {
T s=op::identity();
for(uint i=0; i<N; i++)
op::iop(s, shm[i]);
d_o=(accumulate) ? op::op(d_o, s) : s;
}
}
template<typename T, class op>
__global__ void reduce_L2_kernel(T *d_i) {
reduce_L2_dev<T, op, false>(d_i[0], d_i);
}
template<typename T, class op, bool accumulate>
__global__ void reduce_L2_kernel(T* d_o, const T* d_i) {
reduce_L2_dev<T, op, accumulate>(d_o[0], d_i);
}
////////////////////////////////////////////////////////////////////////////////
template<typename T, class op, class op1>
__device__ void bireduce_L1_dev(T& res, T& res1, const T* d_i, int size) {
__shared__ T shm[N];
__shared__ T shm1[N];
int idx=blockIdx.x*blockDim.x+threadIdx.x;
T s =op::identity();
T s1=op1::identity();
int j=idx;
for(; j + M * N <size; j+=M*N*2) {
op::iop(s, op::op(d_i[j], d_i[j + M*N]));
op1::iop(s1, op1::op(d_i[j], d_i[j + M*N]));
}
shm[threadIdx.x] = (j < size) ? op::op(s, d_i[j]) : s;
shm1[threadIdx.x] = (j < size) ? op1::op(s1, d_i[j]) : s1;
__syncthreads();
#if 1
if(threadIdx.x==0) {
T s =op::identity();
T s1=op1::identity();
for(int i=0; i<N; i++){
op::iop(s, shm[i]);
op1::iop(s1, shm1[i]);
}
res = s;
res1 = s1;
}
#else
if(threadIdx.x==0) {
T s =op::identity();
for(int i=0; i<N; i++)
op::iop(s, shm[i]);
res = s;
}
if(threadIdx.x==1) {
T s1 =op1::identity();
for(int i=0; i<N; i++)
op1::iop(s1, shm1[i]);
res1 = s1;
}
#endif
}
template<typename T, class op, class op1>
__global__ void bireduce_L1_kernel(T *res, T* res1, const T* d_i, int size) {
int blockId = blockIdx.x;
bireduce_L1_dev<T, op, op1>
(res[blockId], res1[blockId], d_i, size);
}
template<typename T, class op, class op1, bool accumulate>
__device__ void reduce_ip2_op2_L2_dev(T& d_o, T& d_o1, const T* d_i, const T* d_i1) {
__shared__ T shm[N];
__shared__ T shm1[N];
uint idx=threadIdx.x;
T s =op::identity();
T s1=op1::identity();
#if M%(2*N)==0
for(uint j=idx; j<M; j+=N*2){
op::iop(s, op::op(d_i[j], d_i[j + N]));
op1::iop(s1, op1::op(d_i1[j], d_i1[j + N]));
}
#else
for(uint j=idx; j<M; j+=N){
op::iop(s, d_i[j]);
op1::iop(s1, d_i1[j]);
}
#endif
shm[threadIdx.x]=s;
shm1[threadIdx.x]=s1;
__syncthreads();
if(idx==0) {
T s=op::identity();
T s1=op1::identity();
for(uint i=0; i<N; i++){
op::iop(s, shm[i]);
op1::iop(s1, shm1[i]);
}
d_o = accumulate ? op::op(d_o, s) : s;
d_o1 = accumulate ? op1::op(d_o1, s1) : s1;
}
}
template<typename T, class op, class op1, bool accumulate>
__global__ void reduce_ip2_op2_L2_kernel(T* d_o, T* d_o1, const T* d_i, const T* d_i1) {
reduce_ip2_op2_L2_dev<T, op, op1, accumulate>(d_o[0], d_o1[0], d_i, d_i1);
}
template<typename T, class op, class op1, bool accumulate>
__global__ void reduce_ip2_op2_L2_kernel(T* d_o, T *d_i, const T* d_i1) {
reduce_ip2_op2_L2_dev<T, op, op1, accumulate>(d_o[0], d_o[1], d_i, d_i1);
}
////////////////////////////////////////////////////////////////////////////////
template<typename T, class op, class op1, class op2>
__device__ void trireduce_L1_dev(T& res, T& res1, T& res2, const T* d_i, int size) {
__shared__ T shm[N];
__shared__ T shm1[N];
__shared__ T shm2[N];
int idx=blockIdx.x*blockDim.x+threadIdx.x;
T s =op::identity();
T s1=op1::identity();
T s2=op2::identity();
int j=idx;
for(; j + M * N <size; j+=M*N*2) {
op::iop(s, op::op(d_i[j], d_i[j + M*N]));
op1::iop(s1, op1::op(d_i[j], d_i[j + M*N]));
op2::iop(s2, op2::op(d_i[j], d_i[j + M*N]));
}
shm[threadIdx.x] = (j < size) ? op::op(s, d_i[j]) : s;
shm1[threadIdx.x] = (j < size) ? op1::op(s1, d_i[j]) : s1;
shm2[threadIdx.x] = (j < size) ? op2::op(s2, d_i[j]) : s2;
__syncthreads();
#if 1
if(threadIdx.x==0) {
T s =op::identity();
T s1=op1::identity();
T s2=op2::identity();
for(int i=0; i<N; i++){
op::iop(s, shm[i]);
op1::iop(s1, shm1[i]);
op2::iop(s2, shm2[i]);
}
res = s;
res1 = s1;
res2 = s2;
}
#else
if(threadIdx.x==0) {
T s =op::identity();
for(int i=0; i<N; i++)
op::iop(s, shm[i]);
res = s;
}
if(threadIdx.x==1) {
T s1 =op1::identity();
for(int i=0; i<N; i++)
op1::iop(s1, shm1[i]);
res1 = s1;
}
if(threadIdx.x==2) {
T s2 =op2::identity();
for(int i=0; i<N; i++)
op2::iop(s2, shm2[i]);
res2 = s2;
}
#endif
}
template<typename T, class op, class op1, class op2, bool accumulate>
__global__ void trireduce_L1_kernel(T *res, T* res1, T* res2,
const T* d_i, int size) {
int blockId = blockIdx.x;
trireduce_L1_dev<T, op, op1, op2, accumulate>
(res[blockId], res1[blockId], res2[blockId], d_i, size);
}
template<typename T, class op, class op1, class op2, bool accumulate>
__device__ void reduce_ip3_op3_L2_dev(T& d_o, T& d_o1, T& d_o2,
const T* d_i, const T* d_i1, const T* d_i2) {
__shared__ T shm[N];
__shared__ T shm1[N];
__shared__ T shm2[N];
uint idx=threadIdx.x;
T s =op::identity();
T s1=op1::identity();
T s2=op2::identity();
#if M%(2*N)==0
for(uint j=idx; j<M; j+=N*2){
op::iop(s, op::op(d_i[j], d_i[j + N]));
op1::iop(s1, op1::op(d_i1[j], d_i1[j + N]));
op2::iop(s2, op2::op(d_i2[j], d_i2[j + N]));
}
#else
for(uint j=idx; j<M; j+=N){
op::iop(s, d_i[j]);
op1::iop(s1, d_i1[j]);
op2::iop(s2, d_i2[j]);
}
#endif
shm[threadIdx.x]=s;
shm1[threadIdx.x]=s1;
shm2[threadIdx.x]=s2;
__syncthreads();
if(idx==0) {
T s=op::identity();
T s1=op1::identity();
T s2=op2::identity();
for(uint i=0; i<N; i++){
op::iop(s, shm[i]);
op1::iop(s1, shm1[i]);
op2::iop(s2, shm2[i]);
}
d_o = accumulate ? op::op(d_o, s) : s;
d_o1 = accumulate ? op1::op(d_o1, s1) : s1;
d_o2 = accumulate ? op2::op(d_o2, s2) : s2;
}
}
template<typename T, class op, class op1, bool accumulate>
__global__ void reduce_ip3_op3_L3_kernel(T* d_o, T* d_o1, T* d_o2,
const T* d_i, const T* d_i1, const T* d_i2) {
reduce_ip3_op3_L2_dev<T, op, op1, accumulate>(d_o[0], d_o1[0], d_o2[0],
d_i, d_i1, d_i2);
}
template<typename T, class op, class op1, bool accumulate>
__global__ void reduce_ip3_op3_L3_kernel(T* d_o, const T* d_i, const T* d_i1, const T* d_i2) {
reduce_ip3_op3_L2_dev<T, op, op1, accumulate>(d_o[0], d_o[1], d_o[2],
d_i, d_i1, d_i2);
}
#endif
} // end namespace PyCA |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
/* ================================================================
*
* PyCA Project
*
* Copyright (c) J. Samuel Preston, Linh K. Ha, Sarang C. Joshi. All
* rights reserved. See Copyright.txt or for details.
*
* This software is distributed WITHOUT ANY WARRANTY; without even the
* implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the above copyright notice for more information.
*
* ================================================================ */
#ifndef __REDUCE_STREAM_KERNEL_CU
#define __REDUCE_STREAM_KERNEL_CU
#if __CUDA_ARCH__ != 200
#define NUMBER_OF_CORES (240 * 16)
#else
#define NUMBER_OF_CORES (240)
#endif
#define OPTIMAL_THREAD_SIZE 64
#define MAX_NUMBER_OF_REDUCE_STREAM 8
#define M NUMBER_OF_CORES // Number of core
#define N OPTIMAL_THREAD_SIZE // Best number of thread per block
#define REDUCE_STREAM_SIZE (M * N)
namespace PyCA {
template<typename T, class op>
__global__ void reduce_L1_kernel(T *res, const T* d_i, int size) {
__shared__ T shm[N];
int idx=blockIdx.x*blockDim.x+threadIdx.x;
T s=op::identity();
int j=idx;
for(; j + M * N <size; j+=M*N*2)
op::iop(s, op::op(d_i[j], d_i[j + M*N]));
shm[threadIdx.x]= (j < size) ? op::op(s, d_i[j]) : s;
__syncthreads();
if(threadIdx.x==0) {
T s=op::identity();
for(int i=0; i<N; i++)
op::iop(s, shm[i]);
res[blockIdx.x]=s;
}
}
template<typename T, class op, class op1>
__global__ void compReduce_L1_kernel(T *res, const T* d_i, int size) {
__shared__ T shm[N];
int idx=blockIdx.x*blockDim.x+threadIdx.x;
T s=op::identity();
int j=idx;
for(; j + M * N <size; j+=M*N*2)
op::iop(s, op::op(op1::op(d_i[j]), op1::op(d_i[j + M*N])));
shm[threadIdx.x]= (j < size) ? op::op(s, op1::op(d_i[j])) : s;
__syncthreads();
if(threadIdx.x==0) {
T s=op::identity();
for(int i=0; i<N; i++)
op::iop(s, shm[i]);
res[blockIdx.x]=s;
}
}
template<typename T, class op, class op1>
__global__ void product_L1_kernel(T *res, const T* d_i0, const T* d_i1, int size) {
__shared__ T shm[N];
int idx=blockIdx.x*blockDim.x+threadIdx.x;
T s=op::identity();
int j=idx;
for(; j + M * N <size; j+=M*N*2)
op::iop(s, op::op(op1::op(d_i0[j],d_i1[j]), op1::op(d_i0[j + M*N], d_i1[j + M*N])));
shm[threadIdx.x]= (j < size) ? op::op(s, op1::op(d_i0[j],d_i1[j])) : s;
__syncthreads();
if(threadIdx.x==0) {
T s=op::identity();
for(int i=0; i<N; i++)
op::iop(s, shm[i]);
res[blockIdx.x]=s;
}
}
////////////////////////////////////////////////////////////////////////////////
template<typename T, class op, bool accumulate>
__device__ void reduce_L2_dev(T& d_o, const T* d_i) {
__shared__ T shm[N];
uint idx=threadIdx.x;
T s=op::identity();
#if M%(2*N)==0
for(uint j=idx; j<M; j+=N*2)
op::iop(s, op::op(d_i[j], d_i[j + N]));
#else
for(uint j=idx; j<M; j+=N)
op::iop(s, d_i[j]);
#endif
shm[threadIdx.x]=s;
__syncthreads();
if(idx==0) {
T s=op::identity();
for(uint i=0; i<N; i++)
op::iop(s, shm[i]);
d_o=(accumulate) ? op::op(d_o, s) : s;
}
}
template<typename T, class op>
__global__ void reduce_L2_kernel(T *d_i) {
reduce_L2_dev<T, op, false>(d_i[0], d_i);
}
template<typename T, class op, bool accumulate>
__global__ void reduce_L2_kernel(T* d_o, const T* d_i) {
reduce_L2_dev<T, op, accumulate>(d_o[0], d_i);
}
////////////////////////////////////////////////////////////////////////////////
template<typename T, class op, class op1>
__device__ void bireduce_L1_dev(T& res, T& res1, const T* d_i, int size) {
__shared__ T shm[N];
__shared__ T shm1[N];
int idx=blockIdx.x*blockDim.x+threadIdx.x;
T s =op::identity();
T s1=op1::identity();
int j=idx;
for(; j + M * N <size; j+=M*N*2) {
op::iop(s, op::op(d_i[j], d_i[j + M*N]));
op1::iop(s1, op1::op(d_i[j], d_i[j + M*N]));
}
shm[threadIdx.x] = (j < size) ? op::op(s, d_i[j]) : s;
shm1[threadIdx.x] = (j < size) ? op1::op(s1, d_i[j]) : s1;
__syncthreads();
#if 1
if(threadIdx.x==0) {
T s =op::identity();
T s1=op1::identity();
for(int i=0; i<N; i++){
op::iop(s, shm[i]);
op1::iop(s1, shm1[i]);
}
res = s;
res1 = s1;
}
#else
if(threadIdx.x==0) {
T s =op::identity();
for(int i=0; i<N; i++)
op::iop(s, shm[i]);
res = s;
}
if(threadIdx.x==1) {
T s1 =op1::identity();
for(int i=0; i<N; i++)
op1::iop(s1, shm1[i]);
res1 = s1;
}
#endif
}
template<typename T, class op, class op1>
__global__ void bireduce_L1_kernel(T *res, T* res1, const T* d_i, int size) {
int blockId = blockIdx.x;
bireduce_L1_dev<T, op, op1>
(res[blockId], res1[blockId], d_i, size);
}
template<typename T, class op, class op1, bool accumulate>
__device__ void reduce_ip2_op2_L2_dev(T& d_o, T& d_o1, const T* d_i, const T* d_i1) {
__shared__ T shm[N];
__shared__ T shm1[N];
uint idx=threadIdx.x;
T s =op::identity();
T s1=op1::identity();
#if M%(2*N)==0
for(uint j=idx; j<M; j+=N*2){
op::iop(s, op::op(d_i[j], d_i[j + N]));
op1::iop(s1, op1::op(d_i1[j], d_i1[j + N]));
}
#else
for(uint j=idx; j<M; j+=N){
op::iop(s, d_i[j]);
op1::iop(s1, d_i1[j]);
}
#endif
shm[threadIdx.x]=s;
shm1[threadIdx.x]=s1;
__syncthreads();
if(idx==0) {
T s=op::identity();
T s1=op1::identity();
for(uint i=0; i<N; i++){
op::iop(s, shm[i]);
op1::iop(s1, shm1[i]);
}
d_o = accumulate ? op::op(d_o, s) : s;
d_o1 = accumulate ? op1::op(d_o1, s1) : s1;
}
}
template<typename T, class op, class op1, bool accumulate>
__global__ void reduce_ip2_op2_L2_kernel(T* d_o, T* d_o1, const T* d_i, const T* d_i1) {
reduce_ip2_op2_L2_dev<T, op, op1, accumulate>(d_o[0], d_o1[0], d_i, d_i1);
}
template<typename T, class op, class op1, bool accumulate>
__global__ void reduce_ip2_op2_L2_kernel(T* d_o, T *d_i, const T* d_i1) {
reduce_ip2_op2_L2_dev<T, op, op1, accumulate>(d_o[0], d_o[1], d_i, d_i1);
}
////////////////////////////////////////////////////////////////////////////////
template<typename T, class op, class op1, class op2>
__device__ void trireduce_L1_dev(T& res, T& res1, T& res2, const T* d_i, int size) {
__shared__ T shm[N];
__shared__ T shm1[N];
__shared__ T shm2[N];
int idx=blockIdx.x*blockDim.x+threadIdx.x;
T s =op::identity();
T s1=op1::identity();
T s2=op2::identity();
int j=idx;
for(; j + M * N <size; j+=M*N*2) {
op::iop(s, op::op(d_i[j], d_i[j + M*N]));
op1::iop(s1, op1::op(d_i[j], d_i[j + M*N]));
op2::iop(s2, op2::op(d_i[j], d_i[j + M*N]));
}
shm[threadIdx.x] = (j < size) ? op::op(s, d_i[j]) : s;
shm1[threadIdx.x] = (j < size) ? op1::op(s1, d_i[j]) : s1;
shm2[threadIdx.x] = (j < size) ? op2::op(s2, d_i[j]) : s2;
__syncthreads();
#if 1
if(threadIdx.x==0) {
T s =op::identity();
T s1=op1::identity();
T s2=op2::identity();
for(int i=0; i<N; i++){
op::iop(s, shm[i]);
op1::iop(s1, shm1[i]);
op2::iop(s2, shm2[i]);
}
res = s;
res1 = s1;
res2 = s2;
}
#else
if(threadIdx.x==0) {
T s =op::identity();
for(int i=0; i<N; i++)
op::iop(s, shm[i]);
res = s;
}
if(threadIdx.x==1) {
T s1 =op1::identity();
for(int i=0; i<N; i++)
op1::iop(s1, shm1[i]);
res1 = s1;
}
if(threadIdx.x==2) {
T s2 =op2::identity();
for(int i=0; i<N; i++)
op2::iop(s2, shm2[i]);
res2 = s2;
}
#endif
}
template<typename T, class op, class op1, class op2, bool accumulate>
__global__ void trireduce_L1_kernel(T *res, T* res1, T* res2,
const T* d_i, int size) {
int blockId = blockIdx.x;
trireduce_L1_dev<T, op, op1, op2, accumulate>
(res[blockId], res1[blockId], res2[blockId], d_i, size);
}
template<typename T, class op, class op1, class op2, bool accumulate>
__device__ void reduce_ip3_op3_L2_dev(T& d_o, T& d_o1, T& d_o2,
const T* d_i, const T* d_i1, const T* d_i2) {
__shared__ T shm[N];
__shared__ T shm1[N];
__shared__ T shm2[N];
uint idx=threadIdx.x;
T s =op::identity();
T s1=op1::identity();
T s2=op2::identity();
#if M%(2*N)==0
for(uint j=idx; j<M; j+=N*2){
op::iop(s, op::op(d_i[j], d_i[j + N]));
op1::iop(s1, op1::op(d_i1[j], d_i1[j + N]));
op2::iop(s2, op2::op(d_i2[j], d_i2[j + N]));
}
#else
for(uint j=idx; j<M; j+=N){
op::iop(s, d_i[j]);
op1::iop(s1, d_i1[j]);
op2::iop(s2, d_i2[j]);
}
#endif
shm[threadIdx.x]=s;
shm1[threadIdx.x]=s1;
shm2[threadIdx.x]=s2;
__syncthreads();
if(idx==0) {
T s=op::identity();
T s1=op1::identity();
T s2=op2::identity();
for(uint i=0; i<N; i++){
op::iop(s, shm[i]);
op1::iop(s1, shm1[i]);
op2::iop(s2, shm2[i]);
}
d_o = accumulate ? op::op(d_o, s) : s;
d_o1 = accumulate ? op1::op(d_o1, s1) : s1;
d_o2 = accumulate ? op2::op(d_o2, s2) : s2;
}
}
template<typename T, class op, class op1, bool accumulate>
__global__ void reduce_ip3_op3_L3_kernel(T* d_o, T* d_o1, T* d_o2,
const T* d_i, const T* d_i1, const T* d_i2) {
reduce_ip3_op3_L2_dev<T, op, op1, accumulate>(d_o[0], d_o1[0], d_o2[0],
d_i, d_i1, d_i2);
}
template<typename T, class op, class op1, bool accumulate>
__global__ void reduce_ip3_op3_L3_kernel(T* d_o, const T* d_i, const T* d_i1, const T* d_i2) {
reduce_ip3_op3_L2_dev<T, op, op1, accumulate>(d_o[0], d_o[1], d_o[2],
d_i, d_i1, d_i2);
}
#endif
} // end namespace PyCA | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
/* ================================================================
*
* PyCA Project
*
* Copyright (c) J. Samuel Preston, Linh K. Ha, Sarang C. Joshi. All
* rights reserved. See Copyright.txt or for details.
*
* This software is distributed WITHOUT ANY WARRANTY; without even the
* implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the above copyright notice for more information.
*
* ================================================================ */
#ifndef __REDUCE_STREAM_KERNEL_CU
#define __REDUCE_STREAM_KERNEL_CU
#if __CUDA_ARCH__ != 200
#define NUMBER_OF_CORES (240 * 16)
#else
#define NUMBER_OF_CORES (240)
#endif
#define OPTIMAL_THREAD_SIZE 64
#define MAX_NUMBER_OF_REDUCE_STREAM 8
#define M NUMBER_OF_CORES // Number of core
#define N OPTIMAL_THREAD_SIZE // Best number of thread per block
#define REDUCE_STREAM_SIZE (M * N)
namespace PyCA {
template<typename T, class op>
__global__ void reduce_L1_kernel(T *res, const T* d_i, int size) {
__shared__ T shm[N];
int idx=blockIdx.x*blockDim.x+threadIdx.x;
T s=op::identity();
int j=idx;
for(; j + M * N <size; j+=M*N*2)
op::iop(s, op::op(d_i[j], d_i[j + M*N]));
shm[threadIdx.x]= (j < size) ? op::op(s, d_i[j]) : s;
__syncthreads();
if(threadIdx.x==0) {
T s=op::identity();
for(int i=0; i<N; i++)
op::iop(s, shm[i]);
res[blockIdx.x]=s;
}
}
template<typename T, class op, class op1>
__global__ void compReduce_L1_kernel(T *res, const T* d_i, int size) {
__shared__ T shm[N];
int idx=blockIdx.x*blockDim.x+threadIdx.x;
T s=op::identity();
int j=idx;
for(; j + M * N <size; j+=M*N*2)
op::iop(s, op::op(op1::op(d_i[j]), op1::op(d_i[j + M*N])));
shm[threadIdx.x]= (j < size) ? op::op(s, op1::op(d_i[j])) : s;
__syncthreads();
if(threadIdx.x==0) {
T s=op::identity();
for(int i=0; i<N; i++)
op::iop(s, shm[i]);
res[blockIdx.x]=s;
}
}
template<typename T, class op, class op1>
__global__ void product_L1_kernel(T *res, const T* d_i0, const T* d_i1, int size) {
__shared__ T shm[N];
int idx=blockIdx.x*blockDim.x+threadIdx.x;
T s=op::identity();
int j=idx;
for(; j + M * N <size; j+=M*N*2)
op::iop(s, op::op(op1::op(d_i0[j],d_i1[j]), op1::op(d_i0[j + M*N], d_i1[j + M*N])));
shm[threadIdx.x]= (j < size) ? op::op(s, op1::op(d_i0[j],d_i1[j])) : s;
__syncthreads();
if(threadIdx.x==0) {
T s=op::identity();
for(int i=0; i<N; i++)
op::iop(s, shm[i]);
res[blockIdx.x]=s;
}
}
////////////////////////////////////////////////////////////////////////////////
template<typename T, class op, bool accumulate>
__device__ void reduce_L2_dev(T& d_o, const T* d_i) {
__shared__ T shm[N];
uint idx=threadIdx.x;
T s=op::identity();
#if M%(2*N)==0
for(uint j=idx; j<M; j+=N*2)
op::iop(s, op::op(d_i[j], d_i[j + N]));
#else
for(uint j=idx; j<M; j+=N)
op::iop(s, d_i[j]);
#endif
shm[threadIdx.x]=s;
__syncthreads();
if(idx==0) {
T s=op::identity();
for(uint i=0; i<N; i++)
op::iop(s, shm[i]);
d_o=(accumulate) ? op::op(d_o, s) : s;
}
}
template<typename T, class op>
__global__ void reduce_L2_kernel(T *d_i) {
reduce_L2_dev<T, op, false>(d_i[0], d_i);
}
template<typename T, class op, bool accumulate>
__global__ void reduce_L2_kernel(T* d_o, const T* d_i) {
reduce_L2_dev<T, op, accumulate>(d_o[0], d_i);
}
////////////////////////////////////////////////////////////////////////////////
template<typename T, class op, class op1>
__device__ void bireduce_L1_dev(T& res, T& res1, const T* d_i, int size) {
__shared__ T shm[N];
__shared__ T shm1[N];
int idx=blockIdx.x*blockDim.x+threadIdx.x;
T s =op::identity();
T s1=op1::identity();
int j=idx;
for(; j + M * N <size; j+=M*N*2) {
op::iop(s, op::op(d_i[j], d_i[j + M*N]));
op1::iop(s1, op1::op(d_i[j], d_i[j + M*N]));
}
shm[threadIdx.x] = (j < size) ? op::op(s, d_i[j]) : s;
shm1[threadIdx.x] = (j < size) ? op1::op(s1, d_i[j]) : s1;
__syncthreads();
#if 1
if(threadIdx.x==0) {
T s =op::identity();
T s1=op1::identity();
for(int i=0; i<N; i++){
op::iop(s, shm[i]);
op1::iop(s1, shm1[i]);
}
res = s;
res1 = s1;
}
#else
if(threadIdx.x==0) {
T s =op::identity();
for(int i=0; i<N; i++)
op::iop(s, shm[i]);
res = s;
}
if(threadIdx.x==1) {
T s1 =op1::identity();
for(int i=0; i<N; i++)
op1::iop(s1, shm1[i]);
res1 = s1;
}
#endif
}
template<typename T, class op, class op1>
__global__ void bireduce_L1_kernel(T *res, T* res1, const T* d_i, int size) {
int blockId = blockIdx.x;
bireduce_L1_dev<T, op, op1>
(res[blockId], res1[blockId], d_i, size);
}
template<typename T, class op, class op1, bool accumulate>
__device__ void reduce_ip2_op2_L2_dev(T& d_o, T& d_o1, const T* d_i, const T* d_i1) {
__shared__ T shm[N];
__shared__ T shm1[N];
uint idx=threadIdx.x;
T s =op::identity();
T s1=op1::identity();
#if M%(2*N)==0
for(uint j=idx; j<M; j+=N*2){
op::iop(s, op::op(d_i[j], d_i[j + N]));
op1::iop(s1, op1::op(d_i1[j], d_i1[j + N]));
}
#else
for(uint j=idx; j<M; j+=N){
op::iop(s, d_i[j]);
op1::iop(s1, d_i1[j]);
}
#endif
shm[threadIdx.x]=s;
shm1[threadIdx.x]=s1;
__syncthreads();
if(idx==0) {
T s=op::identity();
T s1=op1::identity();
for(uint i=0; i<N; i++){
op::iop(s, shm[i]);
op1::iop(s1, shm1[i]);
}
d_o = accumulate ? op::op(d_o, s) : s;
d_o1 = accumulate ? op1::op(d_o1, s1) : s1;
}
}
template<typename T, class op, class op1, bool accumulate>
__global__ void reduce_ip2_op2_L2_kernel(T* d_o, T* d_o1, const T* d_i, const T* d_i1) {
reduce_ip2_op2_L2_dev<T, op, op1, accumulate>(d_o[0], d_o1[0], d_i, d_i1);
}
template<typename T, class op, class op1, bool accumulate>
__global__ void reduce_ip2_op2_L2_kernel(T* d_o, T *d_i, const T* d_i1) {
reduce_ip2_op2_L2_dev<T, op, op1, accumulate>(d_o[0], d_o[1], d_i, d_i1);
}
////////////////////////////////////////////////////////////////////////////////
template<typename T, class op, class op1, class op2>
__device__ void trireduce_L1_dev(T& res, T& res1, T& res2, const T* d_i, int size) {
__shared__ T shm[N];
__shared__ T shm1[N];
__shared__ T shm2[N];
int idx=blockIdx.x*blockDim.x+threadIdx.x;
T s =op::identity();
T s1=op1::identity();
T s2=op2::identity();
int j=idx;
for(; j + M * N <size; j+=M*N*2) {
op::iop(s, op::op(d_i[j], d_i[j + M*N]));
op1::iop(s1, op1::op(d_i[j], d_i[j + M*N]));
op2::iop(s2, op2::op(d_i[j], d_i[j + M*N]));
}
shm[threadIdx.x] = (j < size) ? op::op(s, d_i[j]) : s;
shm1[threadIdx.x] = (j < size) ? op1::op(s1, d_i[j]) : s1;
shm2[threadIdx.x] = (j < size) ? op2::op(s2, d_i[j]) : s2;
__syncthreads();
#if 1
if(threadIdx.x==0) {
T s =op::identity();
T s1=op1::identity();
T s2=op2::identity();
for(int i=0; i<N; i++){
op::iop(s, shm[i]);
op1::iop(s1, shm1[i]);
op2::iop(s2, shm2[i]);
}
res = s;
res1 = s1;
res2 = s2;
}
#else
if(threadIdx.x==0) {
T s =op::identity();
for(int i=0; i<N; i++)
op::iop(s, shm[i]);
res = s;
}
if(threadIdx.x==1) {
T s1 =op1::identity();
for(int i=0; i<N; i++)
op1::iop(s1, shm1[i]);
res1 = s1;
}
if(threadIdx.x==2) {
T s2 =op2::identity();
for(int i=0; i<N; i++)
op2::iop(s2, shm2[i]);
res2 = s2;
}
#endif
}
template<typename T, class op, class op1, class op2, bool accumulate>
__global__ void trireduce_L1_kernel(T *res, T* res1, T* res2,
const T* d_i, int size) {
int blockId = blockIdx.x;
trireduce_L1_dev<T, op, op1, op2, accumulate>
(res[blockId], res1[blockId], res2[blockId], d_i, size);
}
template<typename T, class op, class op1, class op2, bool accumulate>
__device__ void reduce_ip3_op3_L2_dev(T& d_o, T& d_o1, T& d_o2,
const T* d_i, const T* d_i1, const T* d_i2) {
__shared__ T shm[N];
__shared__ T shm1[N];
__shared__ T shm2[N];
uint idx=threadIdx.x;
T s =op::identity();
T s1=op1::identity();
T s2=op2::identity();
#if M%(2*N)==0
for(uint j=idx; j<M; j+=N*2){
op::iop(s, op::op(d_i[j], d_i[j + N]));
op1::iop(s1, op1::op(d_i1[j], d_i1[j + N]));
op2::iop(s2, op2::op(d_i2[j], d_i2[j + N]));
}
#else
for(uint j=idx; j<M; j+=N){
op::iop(s, d_i[j]);
op1::iop(s1, d_i1[j]);
op2::iop(s2, d_i2[j]);
}
#endif
shm[threadIdx.x]=s;
shm1[threadIdx.x]=s1;
shm2[threadIdx.x]=s2;
__syncthreads();
if(idx==0) {
T s=op::identity();
T s1=op1::identity();
T s2=op2::identity();
for(uint i=0; i<N; i++){
op::iop(s, shm[i]);
op1::iop(s1, shm1[i]);
op2::iop(s2, shm2[i]);
}
d_o = accumulate ? op::op(d_o, s) : s;
d_o1 = accumulate ? op1::op(d_o1, s1) : s1;
d_o2 = accumulate ? op2::op(d_o2, s2) : s2;
}
}
template<typename T, class op, class op1, bool accumulate>
__global__ void reduce_ip3_op3_L3_kernel(T* d_o, T* d_o1, T* d_o2,
const T* d_i, const T* d_i1, const T* d_i2) {
reduce_ip3_op3_L2_dev<T, op, op1, accumulate>(d_o[0], d_o1[0], d_o2[0],
d_i, d_i1, d_i2);
}
template<typename T, class op, class op1, bool accumulate>
__global__ void reduce_ip3_op3_L3_kernel(T* d_o, const T* d_i, const T* d_i1, const T* d_i2) {
reduce_ip3_op3_L2_dev<T, op, op1, accumulate>(d_o[0], d_o[1], d_o[2],
d_i, d_i1, d_i2);
}
#endif
} // end namespace PyCA | .text
.file "ReduceStreamKernel.hip"
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80 | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00011158_00000000-6_ReduceStreamKernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2056:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2056:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2079:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2079:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "ReduceStreamKernel.hip"
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cuda.h>
#include <stdio.h>
#include <iostream>
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) \
{ \
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \
fprintf(stderr, "code: %d, reason: %s\n", error, \
cudaGetErrorString(error)); \
exit(1); \
} \
}
void initialData_1(float *in, const int size)
{
for (int i = 0; i < size; i++)
{
in[i] = (float)(rand() & 0xFF) / 10.0f; //100.0f;
}
return;
}
// case 0 copy kernel: access data in rows
__global__ void copyRow_1(float *out, float *in, const int nx, const int ny)
{
unsigned int ix = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int iy = blockDim.y * blockIdx.y + threadIdx.y;
if (ix < nx && iy < ny)
{
out[iy * nx + ix] = in[iy * nx + ix];
}
}
// main functions
//int main(int argc, char **argv)
//{
// // set up device
// int dev = 0;
// cudaDeviceProp deviceProp;
// cudaError error;
//
// error = cudaGetDeviceProperties(&deviceProp, dev);
// printf("%s starting transpose at ", argv[0]);
// printf("device %d: %s ", dev, deviceProp.name);
// printf("allowed memory size : %d",(int)deviceProp.totalGlobalMem);
// error = cudaSetDevice(dev);
//
// // set up array size 2048
// int nx = 1 << 5;
// int ny = 1 << 5;
//
// // select a kernel and block size
// int iKernel = 1;
// int blockx = 16;
// int blocky = 16;
//
// if (argc > 1) blockx = atoi(argv[1]);
//
// if (argc > 2) blocky = atoi(argv[2]);
//
// if (argc > 3) nx = atoi(argv[3]);
//
// if (argc > 4) ny = atoi(argv[4]);
//
//
// size_t nBytes = nx * ny * sizeof(float);
// float bytes = nBytes / (1024 * 1024);
// printf(" with matrix nx %d ny %d with kernel %d with %.2f MB memory\n", nx, ny, iKernel, bytes);
//
// // execution configuration
// dim3 block(blockx, blocky);
// dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y);
//
// // allocate host memory
// float *h_A = (float *)malloc(nBytes);
// float *hostRef = (float *)malloc(nBytes);
// float *gpuRef = (float *)malloc(nBytes);
//
// // initialize host array
// initialData_1(h_A, nx * ny);
//
// // allocate device memory
// float *d_A, *d_C;
// error = cudaMalloc((float**)&d_A, nBytes);
// error = cudaMalloc((float**)&d_C, nBytes);
//
// // copy data from host to device
// error = cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice);
// error = cudaGetLastError();
// printf("%s - %s \n", cudaGetErrorName(error),cudaGetErrorString(error));
//
// copyRow_1 << <grid, block >> >(d_C, d_A, nx, ny);
// error = cudaDeviceSynchronize();
//
// printf("%s <<< grid (%d,%d) block (%d,%d)>>> ", "CopyRow", grid.x, grid.y, block.x,
// block.y);
// cudaGetLastError();
//
// // check kernel results
// if (iKernel > 1)
// {
// error = cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost);
// }
//
// // free host and device memory
// error = cudaFree(d_A);
// error = cudaFree(d_C);
// free(h_A);
// free(hostRef);
// free(gpuRef);
//
// // reset device
// error = cudaDeviceReset();
// system("pause");
// return EXIT_SUCCESS;
//} | code for sm_80
Function : _Z9copyRow_1PfS_ii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R3, SR_CTAID.Y ; /* 0x0000000000037919 */
/* 0x000e280000002600 */
/*0020*/ S2R R2, SR_TID.Y ; /* 0x0000000000027919 */
/* 0x000e280000002200 */
/*0030*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e680000002500 */
/*0040*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000e620000002100 */
/*0050*/ IMAD R3, R3, c[0x0][0x4], R2 ; /* 0x0000010003037a24 */
/* 0x001fca00078e0202 */
/*0060*/ ISETP.GE.U32.AND P0, PT, R3, c[0x0][0x174], PT ; /* 0x00005d0003007a0c */
/* 0x000fe20003f06070 */
/*0070*/ IMAD R0, R0, c[0x0][0x0], R5 ; /* 0x0000000000007a24 */
/* 0x002fca00078e0205 */
/*0080*/ ISETP.GE.U32.OR P0, PT, R0, c[0x0][0x170], P0 ; /* 0x00005c0000007a0c */
/* 0x000fda0000706470 */
/*0090*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00a0*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fe200000001ff */
/*00b0*/ IMAD R0, R3, c[0x0][0x170], R0 ; /* 0x00005c0003007a24 */
/* 0x000fe200078e0200 */
/*00c0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd00000000a00 */
/*00d0*/ IMAD.WIDE.U32 R2, R0, R5, c[0x0][0x168] ; /* 0x00005a0000027625 */
/* 0x000fcc00078e0005 */
/*00e0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*00f0*/ IMAD.WIDE.U32 R4, R0, R5, c[0x0][0x160] ; /* 0x0000580000047625 */
/* 0x000fca00078e0005 */
/*0100*/ STG.E [R4.64], R3 ; /* 0x0000000304007986 */
/* 0x004fe2000c101904 */
/*0110*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0120*/ BRA 0x120; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cuda.h>
#include <stdio.h>
#include <iostream>
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) \
{ \
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \
fprintf(stderr, "code: %d, reason: %s\n", error, \
cudaGetErrorString(error)); \
exit(1); \
} \
}
void initialData_1(float *in, const int size)
{
for (int i = 0; i < size; i++)
{
in[i] = (float)(rand() & 0xFF) / 10.0f; //100.0f;
}
return;
}
// case 0 copy kernel: access data in rows
__global__ void copyRow_1(float *out, float *in, const int nx, const int ny)
{
unsigned int ix = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int iy = blockDim.y * blockIdx.y + threadIdx.y;
if (ix < nx && iy < ny)
{
out[iy * nx + ix] = in[iy * nx + ix];
}
}
// main functions
//int main(int argc, char **argv)
//{
// // set up device
// int dev = 0;
// cudaDeviceProp deviceProp;
// cudaError error;
//
// error = cudaGetDeviceProperties(&deviceProp, dev);
// printf("%s starting transpose at ", argv[0]);
// printf("device %d: %s ", dev, deviceProp.name);
// printf("allowed memory size : %d",(int)deviceProp.totalGlobalMem);
// error = cudaSetDevice(dev);
//
// // set up array size 2048
// int nx = 1 << 5;
// int ny = 1 << 5;
//
// // select a kernel and block size
// int iKernel = 1;
// int blockx = 16;
// int blocky = 16;
//
// if (argc > 1) blockx = atoi(argv[1]);
//
// if (argc > 2) blocky = atoi(argv[2]);
//
// if (argc > 3) nx = atoi(argv[3]);
//
// if (argc > 4) ny = atoi(argv[4]);
//
//
// size_t nBytes = nx * ny * sizeof(float);
// float bytes = nBytes / (1024 * 1024);
// printf(" with matrix nx %d ny %d with kernel %d with %.2f MB memory\n", nx, ny, iKernel, bytes);
//
// // execution configuration
// dim3 block(blockx, blocky);
// dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y);
//
// // allocate host memory
// float *h_A = (float *)malloc(nBytes);
// float *hostRef = (float *)malloc(nBytes);
// float *gpuRef = (float *)malloc(nBytes);
//
// // initialize host array
// initialData_1(h_A, nx * ny);
//
// // allocate device memory
// float *d_A, *d_C;
// error = cudaMalloc((float**)&d_A, nBytes);
// error = cudaMalloc((float**)&d_C, nBytes);
//
// // copy data from host to device
// error = cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice);
// error = cudaGetLastError();
// printf("%s - %s \n", cudaGetErrorName(error),cudaGetErrorString(error));
//
// copyRow_1 << <grid, block >> >(d_C, d_A, nx, ny);
// error = cudaDeviceSynchronize();
//
// printf("%s <<< grid (%d,%d) block (%d,%d)>>> ", "CopyRow", grid.x, grid.y, block.x,
// block.y);
// cudaGetLastError();
//
// // check kernel results
// if (iKernel > 1)
// {
// error = cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost);
// }
//
// // free host and device memory
// error = cudaFree(d_A);
// error = cudaFree(d_C);
// free(h_A);
// free(hostRef);
// free(gpuRef);
//
// // reset device
// error = cudaDeviceReset();
// system("pause");
// return EXIT_SUCCESS;
//} | .file "tmpxft_0013a8a2_00000000-6_block_and_grid_size_check.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3672:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3672:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z13initialData_1Pfi
.type _Z13initialData_1Pfi, @function
_Z13initialData_1Pfi:
.LFB3669:
.cfi_startproc
endbr64
testl %esi, %esi
jle .L8
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $8, %rsp
.cfi_def_cfa_offset 32
movq %rdi, %rbx
movslq %esi, %rsi
leaq (%rdi,%rsi,4), %rbp
.L5:
call rand@PLT
movzbl %al, %eax
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
divss .LC0(%rip), %xmm0
movss %xmm0, (%rbx)
addq $4, %rbx
cmpq %rbp, %rbx
jne .L5
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L8:
.cfi_restore 3
.cfi_restore 6
ret
.cfi_endproc
.LFE3669:
.size _Z13initialData_1Pfi, .-_Z13initialData_1Pfi
.globl _Z32__device_stub__Z9copyRow_1PfS_iiPfS_ii
.type _Z32__device_stub__Z9copyRow_1PfS_iiPfS_ii, @function
_Z32__device_stub__Z9copyRow_1PfS_iiPfS_ii:
.LFB3694:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z9copyRow_1PfS_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3694:
.size _Z32__device_stub__Z9copyRow_1PfS_iiPfS_ii, .-_Z32__device_stub__Z9copyRow_1PfS_iiPfS_ii
.globl _Z9copyRow_1PfS_ii
.type _Z9copyRow_1PfS_ii, @function
_Z9copyRow_1PfS_ii:
.LFB3695:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z32__device_stub__Z9copyRow_1PfS_iiPfS_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3695:
.size _Z9copyRow_1PfS_ii, .-_Z9copyRow_1PfS_ii
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "_Z9copyRow_1PfS_ii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3697:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z9copyRow_1PfS_ii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3697:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC0:
.long 1092616192
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cuda.h>
#include <stdio.h>
#include <iostream>
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) \
{ \
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \
fprintf(stderr, "code: %d, reason: %s\n", error, \
cudaGetErrorString(error)); \
exit(1); \
} \
}
void initialData_1(float *in, const int size)
{
for (int i = 0; i < size; i++)
{
in[i] = (float)(rand() & 0xFF) / 10.0f; //100.0f;
}
return;
}
// case 0 copy kernel: access data in rows
__global__ void copyRow_1(float *out, float *in, const int nx, const int ny)
{
unsigned int ix = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int iy = blockDim.y * blockIdx.y + threadIdx.y;
if (ix < nx && iy < ny)
{
out[iy * nx + ix] = in[iy * nx + ix];
}
}
// main functions
//int main(int argc, char **argv)
//{
// // set up device
// int dev = 0;
// cudaDeviceProp deviceProp;
// cudaError error;
//
// error = cudaGetDeviceProperties(&deviceProp, dev);
// printf("%s starting transpose at ", argv[0]);
// printf("device %d: %s ", dev, deviceProp.name);
// printf("allowed memory size : %d",(int)deviceProp.totalGlobalMem);
// error = cudaSetDevice(dev);
//
// // set up array size 2048
// int nx = 1 << 5;
// int ny = 1 << 5;
//
// // select a kernel and block size
// int iKernel = 1;
// int blockx = 16;
// int blocky = 16;
//
// if (argc > 1) blockx = atoi(argv[1]);
//
// if (argc > 2) blocky = atoi(argv[2]);
//
// if (argc > 3) nx = atoi(argv[3]);
//
// if (argc > 4) ny = atoi(argv[4]);
//
//
// size_t nBytes = nx * ny * sizeof(float);
// float bytes = nBytes / (1024 * 1024);
// printf(" with matrix nx %d ny %d with kernel %d with %.2f MB memory\n", nx, ny, iKernel, bytes);
//
// // execution configuration
// dim3 block(blockx, blocky);
// dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y);
//
// // allocate host memory
// float *h_A = (float *)malloc(nBytes);
// float *hostRef = (float *)malloc(nBytes);
// float *gpuRef = (float *)malloc(nBytes);
//
// // initialize host array
// initialData_1(h_A, nx * ny);
//
// // allocate device memory
// float *d_A, *d_C;
// error = cudaMalloc((float**)&d_A, nBytes);
// error = cudaMalloc((float**)&d_C, nBytes);
//
// // copy data from host to device
// error = cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice);
// error = cudaGetLastError();
// printf("%s - %s \n", cudaGetErrorName(error),cudaGetErrorString(error));
//
// copyRow_1 << <grid, block >> >(d_C, d_A, nx, ny);
// error = cudaDeviceSynchronize();
//
// printf("%s <<< grid (%d,%d) block (%d,%d)>>> ", "CopyRow", grid.x, grid.y, block.x,
// block.y);
// cudaGetLastError();
//
// // check kernel results
// if (iKernel > 1)
// {
// error = cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost);
// }
//
// // free host and device memory
// error = cudaFree(d_A);
// error = cudaFree(d_C);
// free(h_A);
// free(hostRef);
// free(gpuRef);
//
// // reset device
// error = cudaDeviceReset();
// system("pause");
// return EXIT_SUCCESS;
//} | #include "hip/hip_runtime.h"
#include <stdio.h>
#include <iostream>
#define CHECK(call) \
{ \
const hipError_t error = call; \
if (error != hipSuccess) \
{ \
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \
fprintf(stderr, "code: %d, reason: %s\n", error, \
hipGetErrorString(error)); \
exit(1); \
} \
}
void initialData_1(float *in, const int size)
{
for (int i = 0; i < size; i++)
{
in[i] = (float)(rand() & 0xFF) / 10.0f; //100.0f;
}
return;
}
// case 0 copy kernel: access data in rows
__global__ void copyRow_1(float *out, float *in, const int nx, const int ny)
{
unsigned int ix = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int iy = blockDim.y * blockIdx.y + threadIdx.y;
if (ix < nx && iy < ny)
{
out[iy * nx + ix] = in[iy * nx + ix];
}
}
// main functions
//int main(int argc, char **argv)
//{
// // set up device
// int dev = 0;
// cudaDeviceProp deviceProp;
// cudaError error;
//
// error = cudaGetDeviceProperties(&deviceProp, dev);
// printf("%s starting transpose at ", argv[0]);
// printf("device %d: %s ", dev, deviceProp.name);
// printf("allowed memory size : %d",(int)deviceProp.totalGlobalMem);
// error = cudaSetDevice(dev);
//
// // set up array size 2048
// int nx = 1 << 5;
// int ny = 1 << 5;
//
// // select a kernel and block size
// int iKernel = 1;
// int blockx = 16;
// int blocky = 16;
//
// if (argc > 1) blockx = atoi(argv[1]);
//
// if (argc > 2) blocky = atoi(argv[2]);
//
// if (argc > 3) nx = atoi(argv[3]);
//
// if (argc > 4) ny = atoi(argv[4]);
//
//
// size_t nBytes = nx * ny * sizeof(float);
// float bytes = nBytes / (1024 * 1024);
// printf(" with matrix nx %d ny %d with kernel %d with %.2f MB memory\n", nx, ny, iKernel, bytes);
//
// // execution configuration
// dim3 block(blockx, blocky);
// dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y);
//
// // allocate host memory
// float *h_A = (float *)malloc(nBytes);
// float *hostRef = (float *)malloc(nBytes);
// float *gpuRef = (float *)malloc(nBytes);
//
// // initialize host array
// initialData_1(h_A, nx * ny);
//
// // allocate device memory
// float *d_A, *d_C;
// error = cudaMalloc((float**)&d_A, nBytes);
// error = cudaMalloc((float**)&d_C, nBytes);
//
// // copy data from host to device
// error = cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice);
// error = cudaGetLastError();
// printf("%s - %s \n", cudaGetErrorName(error),cudaGetErrorString(error));
//
// copyRow_1 << <grid, block >> >(d_C, d_A, nx, ny);
// error = cudaDeviceSynchronize();
//
// printf("%s <<< grid (%d,%d) block (%d,%d)>>> ", "CopyRow", grid.x, grid.y, block.x,
// block.y);
// cudaGetLastError();
//
// // check kernel results
// if (iKernel > 1)
// {
// error = cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost);
// }
//
// // free host and device memory
// error = cudaFree(d_A);
// error = cudaFree(d_C);
// free(h_A);
// free(hostRef);
// free(gpuRef);
//
// // reset device
// error = cudaDeviceReset();
// system("pause");
// return EXIT_SUCCESS;
//} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include "hip/hip_runtime.h"
#include <stdio.h>
#include <iostream>
#define CHECK(call) \
{ \
const hipError_t error = call; \
if (error != hipSuccess) \
{ \
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \
fprintf(stderr, "code: %d, reason: %s\n", error, \
hipGetErrorString(error)); \
exit(1); \
} \
}
void initialData_1(float *in, const int size)
{
for (int i = 0; i < size; i++)
{
in[i] = (float)(rand() & 0xFF) / 10.0f; //100.0f;
}
return;
}
// case 0 copy kernel: access data in rows
__global__ void copyRow_1(float *out, float *in, const int nx, const int ny)
{
unsigned int ix = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int iy = blockDim.y * blockIdx.y + threadIdx.y;
if (ix < nx && iy < ny)
{
out[iy * nx + ix] = in[iy * nx + ix];
}
}
// main functions
//int main(int argc, char **argv)
//{
// // set up device
// int dev = 0;
// cudaDeviceProp deviceProp;
// cudaError error;
//
// error = cudaGetDeviceProperties(&deviceProp, dev);
// printf("%s starting transpose at ", argv[0]);
// printf("device %d: %s ", dev, deviceProp.name);
// printf("allowed memory size : %d",(int)deviceProp.totalGlobalMem);
// error = cudaSetDevice(dev);
//
// // set up array size 2048
// int nx = 1 << 5;
// int ny = 1 << 5;
//
// // select a kernel and block size
// int iKernel = 1;
// int blockx = 16;
// int blocky = 16;
//
// if (argc > 1) blockx = atoi(argv[1]);
//
// if (argc > 2) blocky = atoi(argv[2]);
//
// if (argc > 3) nx = atoi(argv[3]);
//
// if (argc > 4) ny = atoi(argv[4]);
//
//
// size_t nBytes = nx * ny * sizeof(float);
// float bytes = nBytes / (1024 * 1024);
// printf(" with matrix nx %d ny %d with kernel %d with %.2f MB memory\n", nx, ny, iKernel, bytes);
//
// // execution configuration
// dim3 block(blockx, blocky);
// dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y);
//
// // allocate host memory
// float *h_A = (float *)malloc(nBytes);
// float *hostRef = (float *)malloc(nBytes);
// float *gpuRef = (float *)malloc(nBytes);
//
// // initialize host array
// initialData_1(h_A, nx * ny);
//
// // allocate device memory
// float *d_A, *d_C;
// error = cudaMalloc((float**)&d_A, nBytes);
// error = cudaMalloc((float**)&d_C, nBytes);
//
// // copy data from host to device
// error = cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice);
// error = cudaGetLastError();
// printf("%s - %s \n", cudaGetErrorName(error),cudaGetErrorString(error));
//
// copyRow_1 << <grid, block >> >(d_C, d_A, nx, ny);
// error = cudaDeviceSynchronize();
//
// printf("%s <<< grid (%d,%d) block (%d,%d)>>> ", "CopyRow", grid.x, grid.y, block.x,
// block.y);
// cudaGetLastError();
//
// // check kernel results
// if (iKernel > 1)
// {
// error = cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost);
// }
//
// // free host and device memory
// error = cudaFree(d_A);
// error = cudaFree(d_C);
// free(h_A);
// free(hostRef);
// free(gpuRef);
//
// // reset device
// error = cudaDeviceReset();
// system("pause");
// return EXIT_SUCCESS;
//} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z9copyRow_1PfS_ii
.globl _Z9copyRow_1PfS_ii
.p2align 8
.type _Z9copyRow_1PfS_ii,@function
_Z9copyRow_1PfS_ii:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b64 s[4:5], s[0:1], 0x10
v_and_b32_e32 v2, 0x3ff, v0
v_bfe_u32 v3, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s2, 0xffff
s_lshr_b32 s2, s2, 16
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_mad_u64_u32 v[0:1], null, s14, s3, v[2:3]
v_mad_u64_u32 v[1:2], null, s15, s2, v[3:4]
v_cmp_gt_u32_e32 vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_gt_u32_e64 s2, s5, v1
s_and_b32 s2, vcc_lo, s2
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_2
s_load_b128 s[0:3], s[0:1], 0x0
v_mad_u64_u32 v[2:3], null, v1, s4, v[0:1]
v_mov_b32_e32 v3, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[2:3]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s2, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s3, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_load_b32 v2, v[2:3], off
s_waitcnt vmcnt(0)
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9copyRow_1PfS_ii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z9copyRow_1PfS_ii, .Lfunc_end0-_Z9copyRow_1PfS_ii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9copyRow_1PfS_ii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z9copyRow_1PfS_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include "hip/hip_runtime.h"
#include <stdio.h>
#include <iostream>
#define CHECK(call) \
{ \
const hipError_t error = call; \
if (error != hipSuccess) \
{ \
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \
fprintf(stderr, "code: %d, reason: %s\n", error, \
hipGetErrorString(error)); \
exit(1); \
} \
}
void initialData_1(float *in, const int size)
{
for (int i = 0; i < size; i++)
{
in[i] = (float)(rand() & 0xFF) / 10.0f; //100.0f;
}
return;
}
// case 0 copy kernel: access data in rows
__global__ void copyRow_1(float *out, float *in, const int nx, const int ny)
{
unsigned int ix = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int iy = blockDim.y * blockIdx.y + threadIdx.y;
if (ix < nx && iy < ny)
{
out[iy * nx + ix] = in[iy * nx + ix];
}
}
// main functions
//int main(int argc, char **argv)
//{
// // set up device
// int dev = 0;
// cudaDeviceProp deviceProp;
// cudaError error;
//
// error = cudaGetDeviceProperties(&deviceProp, dev);
// printf("%s starting transpose at ", argv[0]);
// printf("device %d: %s ", dev, deviceProp.name);
// printf("allowed memory size : %d",(int)deviceProp.totalGlobalMem);
// error = cudaSetDevice(dev);
//
// // set up array size 2048
// int nx = 1 << 5;
// int ny = 1 << 5;
//
// // select a kernel and block size
// int iKernel = 1;
// int blockx = 16;
// int blocky = 16;
//
// if (argc > 1) blockx = atoi(argv[1]);
//
// if (argc > 2) blocky = atoi(argv[2]);
//
// if (argc > 3) nx = atoi(argv[3]);
//
// if (argc > 4) ny = atoi(argv[4]);
//
//
// size_t nBytes = nx * ny * sizeof(float);
// float bytes = nBytes / (1024 * 1024);
// printf(" with matrix nx %d ny %d with kernel %d with %.2f MB memory\n", nx, ny, iKernel, bytes);
//
// // execution configuration
// dim3 block(blockx, blocky);
// dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y);
//
// // allocate host memory
// float *h_A = (float *)malloc(nBytes);
// float *hostRef = (float *)malloc(nBytes);
// float *gpuRef = (float *)malloc(nBytes);
//
// // initialize host array
// initialData_1(h_A, nx * ny);
//
// // allocate device memory
// float *d_A, *d_C;
// error = cudaMalloc((float**)&d_A, nBytes);
// error = cudaMalloc((float**)&d_C, nBytes);
//
// // copy data from host to device
// error = cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice);
// error = cudaGetLastError();
// printf("%s - %s \n", cudaGetErrorName(error),cudaGetErrorString(error));
//
// copyRow_1 << <grid, block >> >(d_C, d_A, nx, ny);
// error = cudaDeviceSynchronize();
//
// printf("%s <<< grid (%d,%d) block (%d,%d)>>> ", "CopyRow", grid.x, grid.y, block.x,
// block.y);
// cudaGetLastError();
//
// // check kernel results
// if (iKernel > 1)
// {
// error = cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost);
// }
//
// // free host and device memory
// error = cudaFree(d_A);
// error = cudaFree(d_C);
// free(h_A);
// free(hostRef);
// free(gpuRef);
//
// // reset device
// error = cudaDeviceReset();
// system("pause");
// return EXIT_SUCCESS;
//} | .text
.file "block_and_grid_size_check.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function _Z13initialData_1Pfi
.LCPI0_0:
.long 0x41200000 # float 10
.text
.globl _Z13initialData_1Pfi
.p2align 4, 0x90
.type _Z13initialData_1Pfi,@function
_Z13initialData_1Pfi: # @_Z13initialData_1Pfi
.cfi_startproc
# %bb.0:
testl %esi, %esi
jle .LBB0_4
# %bb.1: # %.lr.ph.preheader
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rdi, %rbx
movl %esi, %r14d
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB0_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
callq rand
movzbl %al, %eax
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
divss .LCPI0_0(%rip), %xmm0
movss %xmm0, (%rbx,%r15,4)
incq %r15
cmpq %r15, %r14
jne .LBB0_2
# %bb.3:
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %r14
.cfi_restore %r15
.LBB0_4: # %._crit_edge
retq
.Lfunc_end0:
.size _Z13initialData_1Pfi, .Lfunc_end0-_Z13initialData_1Pfi
.cfi_endproc
# -- End function
.globl _Z24__device_stub__copyRow_1PfS_ii # -- Begin function _Z24__device_stub__copyRow_1PfS_ii
.p2align 4, 0x90
.type _Z24__device_stub__copyRow_1PfS_ii,@function
_Z24__device_stub__copyRow_1PfS_ii: # @_Z24__device_stub__copyRow_1PfS_ii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9copyRow_1PfS_ii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end1:
.size _Z24__device_stub__copyRow_1PfS_ii, .Lfunc_end1-_Z24__device_stub__copyRow_1PfS_ii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9copyRow_1PfS_ii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9copyRow_1PfS_ii,@object # @_Z9copyRow_1PfS_ii
.section .rodata,"a",@progbits
.globl _Z9copyRow_1PfS_ii
.p2align 3, 0x0
_Z9copyRow_1PfS_ii:
.quad _Z24__device_stub__copyRow_1PfS_ii
.size _Z9copyRow_1PfS_ii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z9copyRow_1PfS_ii"
.size .L__unnamed_1, 19
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__copyRow_1PfS_ii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9copyRow_1PfS_ii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z9copyRow_1PfS_ii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R3, SR_CTAID.Y ; /* 0x0000000000037919 */
/* 0x000e280000002600 */
/*0020*/ S2R R2, SR_TID.Y ; /* 0x0000000000027919 */
/* 0x000e280000002200 */
/*0030*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e680000002500 */
/*0040*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000e620000002100 */
/*0050*/ IMAD R3, R3, c[0x0][0x4], R2 ; /* 0x0000010003037a24 */
/* 0x001fca00078e0202 */
/*0060*/ ISETP.GE.U32.AND P0, PT, R3, c[0x0][0x174], PT ; /* 0x00005d0003007a0c */
/* 0x000fe20003f06070 */
/*0070*/ IMAD R0, R0, c[0x0][0x0], R5 ; /* 0x0000000000007a24 */
/* 0x002fca00078e0205 */
/*0080*/ ISETP.GE.U32.OR P0, PT, R0, c[0x0][0x170], P0 ; /* 0x00005c0000007a0c */
/* 0x000fda0000706470 */
/*0090*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00a0*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fe200000001ff */
/*00b0*/ IMAD R0, R3, c[0x0][0x170], R0 ; /* 0x00005c0003007a24 */
/* 0x000fe200078e0200 */
/*00c0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd00000000a00 */
/*00d0*/ IMAD.WIDE.U32 R2, R0, R5, c[0x0][0x168] ; /* 0x00005a0000027625 */
/* 0x000fcc00078e0005 */
/*00e0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*00f0*/ IMAD.WIDE.U32 R4, R0, R5, c[0x0][0x160] ; /* 0x0000580000047625 */
/* 0x000fca00078e0005 */
/*0100*/ STG.E [R4.64], R3 ; /* 0x0000000304007986 */
/* 0x004fe2000c101904 */
/*0110*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0120*/ BRA 0x120; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z9copyRow_1PfS_ii
.globl _Z9copyRow_1PfS_ii
.p2align 8
.type _Z9copyRow_1PfS_ii,@function
_Z9copyRow_1PfS_ii:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b64 s[4:5], s[0:1], 0x10
v_and_b32_e32 v2, 0x3ff, v0
v_bfe_u32 v3, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s2, 0xffff
s_lshr_b32 s2, s2, 16
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_mad_u64_u32 v[0:1], null, s14, s3, v[2:3]
v_mad_u64_u32 v[1:2], null, s15, s2, v[3:4]
v_cmp_gt_u32_e32 vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_gt_u32_e64 s2, s5, v1
s_and_b32 s2, vcc_lo, s2
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_2
s_load_b128 s[0:3], s[0:1], 0x0
v_mad_u64_u32 v[2:3], null, v1, s4, v[0:1]
v_mov_b32_e32 v3, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[2:3]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s2, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s3, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_load_b32 v2, v[2:3], off
s_waitcnt vmcnt(0)
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9copyRow_1PfS_ii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z9copyRow_1PfS_ii, .Lfunc_end0-_Z9copyRow_1PfS_ii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9copyRow_1PfS_ii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z9copyRow_1PfS_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0013a8a2_00000000-6_block_and_grid_size_check.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3672:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3672:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z13initialData_1Pfi
.type _Z13initialData_1Pfi, @function
_Z13initialData_1Pfi:
.LFB3669:
.cfi_startproc
endbr64
testl %esi, %esi
jle .L8
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $8, %rsp
.cfi_def_cfa_offset 32
movq %rdi, %rbx
movslq %esi, %rsi
leaq (%rdi,%rsi,4), %rbp
.L5:
call rand@PLT
movzbl %al, %eax
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
divss .LC0(%rip), %xmm0
movss %xmm0, (%rbx)
addq $4, %rbx
cmpq %rbp, %rbx
jne .L5
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L8:
.cfi_restore 3
.cfi_restore 6
ret
.cfi_endproc
.LFE3669:
.size _Z13initialData_1Pfi, .-_Z13initialData_1Pfi
.globl _Z32__device_stub__Z9copyRow_1PfS_iiPfS_ii
.type _Z32__device_stub__Z9copyRow_1PfS_iiPfS_ii, @function
_Z32__device_stub__Z9copyRow_1PfS_iiPfS_ii:
.LFB3694:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z9copyRow_1PfS_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3694:
.size _Z32__device_stub__Z9copyRow_1PfS_iiPfS_ii, .-_Z32__device_stub__Z9copyRow_1PfS_iiPfS_ii
.globl _Z9copyRow_1PfS_ii
.type _Z9copyRow_1PfS_ii, @function
_Z9copyRow_1PfS_ii:
.LFB3695:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z32__device_stub__Z9copyRow_1PfS_iiPfS_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3695:
.size _Z9copyRow_1PfS_ii, .-_Z9copyRow_1PfS_ii
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "_Z9copyRow_1PfS_ii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3697:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z9copyRow_1PfS_ii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3697:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC0:
.long 1092616192
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "block_and_grid_size_check.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function _Z13initialData_1Pfi
.LCPI0_0:
.long 0x41200000 # float 10
.text
.globl _Z13initialData_1Pfi
.p2align 4, 0x90
.type _Z13initialData_1Pfi,@function
_Z13initialData_1Pfi: # @_Z13initialData_1Pfi
.cfi_startproc
# %bb.0:
testl %esi, %esi
jle .LBB0_4
# %bb.1: # %.lr.ph.preheader
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rdi, %rbx
movl %esi, %r14d
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB0_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
callq rand
movzbl %al, %eax
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
divss .LCPI0_0(%rip), %xmm0
movss %xmm0, (%rbx,%r15,4)
incq %r15
cmpq %r15, %r14
jne .LBB0_2
# %bb.3:
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %r14
.cfi_restore %r15
.LBB0_4: # %._crit_edge
retq
.Lfunc_end0:
.size _Z13initialData_1Pfi, .Lfunc_end0-_Z13initialData_1Pfi
.cfi_endproc
# -- End function
.globl _Z24__device_stub__copyRow_1PfS_ii # -- Begin function _Z24__device_stub__copyRow_1PfS_ii
.p2align 4, 0x90
.type _Z24__device_stub__copyRow_1PfS_ii,@function
_Z24__device_stub__copyRow_1PfS_ii: # @_Z24__device_stub__copyRow_1PfS_ii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9copyRow_1PfS_ii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end1:
.size _Z24__device_stub__copyRow_1PfS_ii, .Lfunc_end1-_Z24__device_stub__copyRow_1PfS_ii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9copyRow_1PfS_ii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9copyRow_1PfS_ii,@object # @_Z9copyRow_1PfS_ii
.section .rodata,"a",@progbits
.globl _Z9copyRow_1PfS_ii
.p2align 3, 0x0
_Z9copyRow_1PfS_ii:
.quad _Z24__device_stub__copyRow_1PfS_ii
.size _Z9copyRow_1PfS_ii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z9copyRow_1PfS_ii"
.size .L__unnamed_1, 19
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__copyRow_1PfS_ii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9copyRow_1PfS_ii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | // The boilerplatte code is taken from tuwel
#include <cuda_runtime.h>
#include <sys/time.h>
#include <cuda_profiler_api.h>
#include "main.cuh"
#include "helper.cuh"
#include "GPUStream.cuh"
#include "StreamFunctions.cuh"
#include "StreamFunctionsThrust.cuh"
void experimental_time(){
}
void test_count(){
printf("count() test\n");
int *input_1 = (int*)malloc(5*sizeof(int));
int *input_2 = (int*)malloc(5*sizeof(int));
input_1[0] = 0;
input_1[1] = 1;
input_1[2] = 2;
input_1[3] = 3;
input_1[4] = 4;
input_2[0] = 1;
input_2[1] = 2;
input_2[2] = 3;
input_2[3] = 4;
input_2[4] = 5;
std::shared_ptr<GPUUnitStream> inp_1(new GPUUnitStream(input_1, 5));
std::shared_ptr<GPUUnitStream> inp_2(new GPUUnitStream(input_2, 5));
printf("made inputs\n");
inp_1->copy_to_device();
inp_2->copy_to_device();
std::shared_ptr<GPUIntStream> res_1 = count(inp_1);
std::shared_ptr<GPUIntStream> res_2 = count(inp_2);
res_1->host_offset = (int*)malloc(sizeof(int));
res_1->host_timestamp = (int*)malloc(6*sizeof(int));
res_1->host_values = (int*)malloc(6*sizeof(int));
res_1->size = 6;
res_2->host_offset = (int*)malloc(sizeof(int));
res_2->host_timestamp = (int*)malloc(6*sizeof(int));
res_2->host_values = (int*)malloc(6*sizeof(int));
res_2->size = 6;
res_1->copy_to_host();
res_2->copy_to_host();
printf("RESULT 1:\n");
res_1->print();
printf("RESULT 2:\n");
res_2->print();
}
void test_slift(){
printf("slift test\n");
/*
0: y = 1
1: z = 2
1: y = 2
2: y = 2
3: z = 2
3: y = 2
50: y = 2
51: z = 10
*/
int sx = 5;
int sy = 3;
int *x_v = (int*)malloc(sx*sizeof(int));
int *y_v = (int*)malloc(sy*sizeof(int));
int *x_ts = (int*)malloc(sx*sizeof(int));
int *y_ts = (int*)malloc(sy*sizeof(int));
x_ts[0] = 0;
x_ts[1] = 1;
x_ts[2] = 2;
x_ts[3] = 3;
x_ts[4] = 50;
x_v[0] = 1;
x_v[1] = 2;
x_v[2] = 2;
x_v[3] = 2;
x_v[4] = 2;
y_ts[0] = 1;
y_ts[1] = 3;
y_ts[2] = 51;
y_v[0] = 2;
y_v[1] = 2;
y_v[2] = 10;
//int *res_ts = (int*)malloc((sx+sy)*sizeof(int));
//int *res_v = (int*)malloc((sx+sy)*sizeof(int));
/*for (int i=0; i<sx; i++){
x_ts[i] = i;
x_v[i] = i;
}
for (int i=0; i<sy; i++){
y_ts[i] = i;
y_v[i] = i;
}*/
std::shared_ptr<GPUIntStream> x(new GPUIntStream(x_ts, x_v, sx));
std::shared_ptr<GPUIntStream> y(new GPUIntStream(y_ts, y_v, sy));
x->copy_to_device();
y->copy_to_device();
std::shared_ptr<GPUIntStream> res = slift(x,y, MRG);
res->host_offset = (int*)malloc(sizeof(int));
res->host_timestamp = (int*)malloc(res->size*sizeof(int));
res->host_values = (int*)malloc(res->size*sizeof(int));
res->copy_to_host();
//x->print();
//y->print();
res->print();
}
int main(int argc, char **argv) {
printf("%s Starting...\n", argv[0]);
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
test_slift();
//test_count();
return(0);
} | code for sm_80
Function : _ZN3cub17CUB_200700_800_NS11EmptyKernelIvEEvv
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0020*/ BRA 0x20; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0030*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0040*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0050*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0060*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0070*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0080*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0090*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | // The boilerplatte code is taken from tuwel
#include <cuda_runtime.h>
#include <sys/time.h>
#include <cuda_profiler_api.h>
#include "main.cuh"
#include "helper.cuh"
#include "GPUStream.cuh"
#include "StreamFunctions.cuh"
#include "StreamFunctionsThrust.cuh"
void experimental_time(){
}
void test_count(){
printf("count() test\n");
int *input_1 = (int*)malloc(5*sizeof(int));
int *input_2 = (int*)malloc(5*sizeof(int));
input_1[0] = 0;
input_1[1] = 1;
input_1[2] = 2;
input_1[3] = 3;
input_1[4] = 4;
input_2[0] = 1;
input_2[1] = 2;
input_2[2] = 3;
input_2[3] = 4;
input_2[4] = 5;
std::shared_ptr<GPUUnitStream> inp_1(new GPUUnitStream(input_1, 5));
std::shared_ptr<GPUUnitStream> inp_2(new GPUUnitStream(input_2, 5));
printf("made inputs\n");
inp_1->copy_to_device();
inp_2->copy_to_device();
std::shared_ptr<GPUIntStream> res_1 = count(inp_1);
std::shared_ptr<GPUIntStream> res_2 = count(inp_2);
res_1->host_offset = (int*)malloc(sizeof(int));
res_1->host_timestamp = (int*)malloc(6*sizeof(int));
res_1->host_values = (int*)malloc(6*sizeof(int));
res_1->size = 6;
res_2->host_offset = (int*)malloc(sizeof(int));
res_2->host_timestamp = (int*)malloc(6*sizeof(int));
res_2->host_values = (int*)malloc(6*sizeof(int));
res_2->size = 6;
res_1->copy_to_host();
res_2->copy_to_host();
printf("RESULT 1:\n");
res_1->print();
printf("RESULT 2:\n");
res_2->print();
}
void test_slift(){
printf("slift test\n");
/*
0: y = 1
1: z = 2
1: y = 2
2: y = 2
3: z = 2
3: y = 2
50: y = 2
51: z = 10
*/
int sx = 5;
int sy = 3;
int *x_v = (int*)malloc(sx*sizeof(int));
int *y_v = (int*)malloc(sy*sizeof(int));
int *x_ts = (int*)malloc(sx*sizeof(int));
int *y_ts = (int*)malloc(sy*sizeof(int));
x_ts[0] = 0;
x_ts[1] = 1;
x_ts[2] = 2;
x_ts[3] = 3;
x_ts[4] = 50;
x_v[0] = 1;
x_v[1] = 2;
x_v[2] = 2;
x_v[3] = 2;
x_v[4] = 2;
y_ts[0] = 1;
y_ts[1] = 3;
y_ts[2] = 51;
y_v[0] = 2;
y_v[1] = 2;
y_v[2] = 10;
//int *res_ts = (int*)malloc((sx+sy)*sizeof(int));
//int *res_v = (int*)malloc((sx+sy)*sizeof(int));
/*for (int i=0; i<sx; i++){
x_ts[i] = i;
x_v[i] = i;
}
for (int i=0; i<sy; i++){
y_ts[i] = i;
y_v[i] = i;
}*/
std::shared_ptr<GPUIntStream> x(new GPUIntStream(x_ts, x_v, sx));
std::shared_ptr<GPUIntStream> y(new GPUIntStream(y_ts, y_v, sy));
x->copy_to_device();
y->copy_to_device();
std::shared_ptr<GPUIntStream> res = slift(x,y, MRG);
res->host_offset = (int*)malloc(sizeof(int));
res->host_timestamp = (int*)malloc(res->size*sizeof(int));
res->host_values = (int*)malloc(res->size*sizeof(int));
res->copy_to_host();
//x->print();
//y->print();
res->print();
}
int main(int argc, char **argv) {
printf("%s Starting...\n", argv[0]);
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
test_slift();
//test_count();
return(0);
} | //
// Created by fabian on 28.05.21.
//
#ifndef TESSLA_CUDA_MAIN_CUH
#define TESSLA_CUDA_MAIN_CUH
//check for cuda sucess
#endif //TESSLA_CUDA_MAIN_CUH |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | //
// Created by fabian on 28.05.21.
//
#ifndef TESSLA_CUDA_MAIN_CUH
#define TESSLA_CUDA_MAIN_CUH
//check for cuda sucess
#endif //TESSLA_CUDA_MAIN_CUH | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | //
// Created by fabian on 28.05.21.
//
#ifndef TESSLA_CUDA_MAIN_CUH
#define TESSLA_CUDA_MAIN_CUH
//check for cuda sucess
#endif //TESSLA_CUDA_MAIN_CUH | .text
.file "main.hip"
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _ZN3cub17CUB_200700_800_NS11EmptyKernelIvEEvv
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0020*/ BRA 0x20; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0030*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0040*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0050*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0060*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0070*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0080*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0090*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <stdio.h>
#include <cuda.h>
__global__ void run(int *a, int *b, int N) {
int idx = blockIdx.x * blockDim.x + blockIdx.y * blockDim.y + threadIdx.x + threadIdx.y * blockDim.y;
if (idx < N) {
a[idx] = threadIdx.x;
b[idx] = blockIdx.x;
}
}
int main() {
int *a, *b, N = 30;
cudaMalloc((void **)&a, N * sizeof(int));
cudaMalloc((void **)&b, N * sizeof(int));
dim3 gridSize(2, 2);
dim3 blockSize(2, 2);
// run<<<gridSize, blockSize>>>(a, b, N);
run<<<2, 4>>>(a, b, N);
int *hA = new int[N], *hB = new int[N];
cudaMemcpy(hA, a, sizeof(int) * N, cudaMemcpyDeviceToHost);
cudaMemcpy(hB, b, sizeof(int) * N, cudaMemcpyDeviceToHost);
for (int i = 0; i < N; i++) {
printf("%d: %d[%d]\n", i, hA[i], hB[i]);
}
cudaFree(a);
cudaFree(b);
} | code for sm_80
Function : _Z3runPiS_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2UR UR4, SR_CTAID.Y ; /* 0x00000000000479c3 */
/* 0x000e220000002600 */
/*0020*/ S2R R0, SR_TID.Y ; /* 0x0000000000007919 */
/* 0x000e280000002200 */
/*0030*/ S2R R7, SR_CTAID.X ; /* 0x0000000000077919 */
/* 0x000e680000002500 */
/*0040*/ S2R R6, SR_TID.X ; /* 0x0000000000067919 */
/* 0x000e620000002100 */
/*0050*/ IADD3 R0, R0, UR4, RZ ; /* 0x0000000400007c10 */
/* 0x001fe2000fffe0ff */
/*0060*/ IMAD R3, R7, c[0x0][0x0], R6 ; /* 0x0000000007037a24 */
/* 0x002fc800078e0206 */
/*0070*/ IMAD R0, R0, c[0x0][0x4], R3 ; /* 0x0000010000007a24 */
/* 0x000fca00078e0203 */
/*0080*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x170], PT ; /* 0x00005c0000007a0c */
/* 0x000fda0003f06270 */
/*0090*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00a0*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fe200000001ff */
/*00b0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*00c0*/ IMAD.WIDE R2, R0, R5, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fc800078e0205 */
/*00d0*/ IMAD.WIDE R4, R0, R5, c[0x0][0x168] ; /* 0x00005a0000047625 */
/* 0x000fe200078e0205 */
/*00e0*/ STG.E [R2.64], R6 ; /* 0x0000000602007986 */
/* 0x000fe8000c101904 */
/*00f0*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x000fe2000c101904 */
/*0100*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0110*/ BRA 0x110; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
#include <cuda.h>
__global__ void run(int *a, int *b, int N) {
int idx = blockIdx.x * blockDim.x + blockIdx.y * blockDim.y + threadIdx.x + threadIdx.y * blockDim.y;
if (idx < N) {
a[idx] = threadIdx.x;
b[idx] = blockIdx.x;
}
}
int main() {
int *a, *b, N = 30;
cudaMalloc((void **)&a, N * sizeof(int));
cudaMalloc((void **)&b, N * sizeof(int));
dim3 gridSize(2, 2);
dim3 blockSize(2, 2);
// run<<<gridSize, blockSize>>>(a, b, N);
run<<<2, 4>>>(a, b, N);
int *hA = new int[N], *hB = new int[N];
cudaMemcpy(hA, a, sizeof(int) * N, cudaMemcpyDeviceToHost);
cudaMemcpy(hB, b, sizeof(int) * N, cudaMemcpyDeviceToHost);
for (int i = 0; i < N; i++) {
printf("%d: %d[%d]\n", i, hA[i], hB[i]);
}
cudaFree(a);
cudaFree(b);
} | .file "tmpxft_0004bdfd_00000000-6_intro.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z25__device_stub__Z3runPiS_iPiS_i
.type _Z25__device_stub__Z3runPiS_iPiS_i, @function
_Z25__device_stub__Z3runPiS_iPiS_i:
.LFB2082:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z3runPiS_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z25__device_stub__Z3runPiS_iPiS_i, .-_Z25__device_stub__Z3runPiS_iPiS_i
.globl _Z3runPiS_i
.type _Z3runPiS_i, @function
_Z3runPiS_i:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z25__device_stub__Z3runPiS_iPiS_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z3runPiS_i, .-_Z3runPiS_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "%d: %d[%d]\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $56, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
movq %rsp, %rdi
movl $120, %esi
call cudaMalloc@PLT
leaq 8(%rsp), %rdi
movl $120, %esi
call cudaMalloc@PLT
movl $4, 28(%rsp)
movl $1, 32(%rsp)
movl $2, 16(%rsp)
movl $1, 20(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 28(%rsp), %rdx
movl $1, %ecx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L17
.L12:
movl $120, %edi
call _Znam@PLT
movq %rax, %r12
movl $120, %edi
call _Znam@PLT
movq %rax, %rbp
movl $2, %ecx
movl $120, %edx
movq (%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
movl $2, %ecx
movl $120, %edx
movq 8(%rsp), %rsi
movq %rbp, %rdi
call cudaMemcpy@PLT
movl $0, %ebx
leaq .LC0(%rip), %r13
.L13:
movl (%r12,%rbx,4), %ecx
movl 0(%rbp,%rbx,4), %r8d
movl %ebx, %edx
movq %r13, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $1, %rbx
cmpq $30, %rbx
jne .L13
movq (%rsp), %rdi
call cudaFree@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L18
movl $0, %eax
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L17:
.cfi_restore_state
movl $30, %edx
movq 8(%rsp), %rsi
movq (%rsp), %rdi
call _Z25__device_stub__Z3runPiS_iPiS_i
jmp .L12
.L18:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC1:
.string "_Z3runPiS_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z3runPiS_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include <cuda.h>
__global__ void run(int *a, int *b, int N) {
int idx = blockIdx.x * blockDim.x + blockIdx.y * blockDim.y + threadIdx.x + threadIdx.y * blockDim.y;
if (idx < N) {
a[idx] = threadIdx.x;
b[idx] = blockIdx.x;
}
}
int main() {
int *a, *b, N = 30;
cudaMalloc((void **)&a, N * sizeof(int));
cudaMalloc((void **)&b, N * sizeof(int));
dim3 gridSize(2, 2);
dim3 blockSize(2, 2);
// run<<<gridSize, blockSize>>>(a, b, N);
run<<<2, 4>>>(a, b, N);
int *hA = new int[N], *hB = new int[N];
cudaMemcpy(hA, a, sizeof(int) * N, cudaMemcpyDeviceToHost);
cudaMemcpy(hB, b, sizeof(int) * N, cudaMemcpyDeviceToHost);
for (int i = 0; i < N; i++) {
printf("%d: %d[%d]\n", i, hA[i], hB[i]);
}
cudaFree(a);
cudaFree(b);
} | #include <stdio.h>
#include <hip/hip_runtime.h>
__global__ void run(int *a, int *b, int N) {
int idx = blockIdx.x * blockDim.x + blockIdx.y * blockDim.y + threadIdx.x + threadIdx.y * blockDim.y;
if (idx < N) {
a[idx] = threadIdx.x;
b[idx] = blockIdx.x;
}
}
int main() {
int *a, *b, N = 30;
hipMalloc((void **)&a, N * sizeof(int));
hipMalloc((void **)&b, N * sizeof(int));
dim3 gridSize(2, 2);
dim3 blockSize(2, 2);
// run<<<gridSize, blockSize>>>(a, b, N);
run<<<2, 4>>>(a, b, N);
int *hA = new int[N], *hB = new int[N];
hipMemcpy(hA, a, sizeof(int) * N, hipMemcpyDeviceToHost);
hipMemcpy(hB, b, sizeof(int) * N, hipMemcpyDeviceToHost);
for (int i = 0; i < N; i++) {
printf("%d: %d[%d]\n", i, hA[i], hB[i]);
}
hipFree(a);
hipFree(b);
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <stdio.h>
#include <hip/hip_runtime.h>
__global__ void run(int *a, int *b, int N) {
int idx = blockIdx.x * blockDim.x + blockIdx.y * blockDim.y + threadIdx.x + threadIdx.y * blockDim.y;
if (idx < N) {
a[idx] = threadIdx.x;
b[idx] = blockIdx.x;
}
}
int main() {
int *a, *b, N = 30;
hipMalloc((void **)&a, N * sizeof(int));
hipMalloc((void **)&b, N * sizeof(int));
dim3 gridSize(2, 2);
dim3 blockSize(2, 2);
// run<<<gridSize, blockSize>>>(a, b, N);
run<<<2, 4>>>(a, b, N);
int *hA = new int[N], *hB = new int[N];
hipMemcpy(hA, a, sizeof(int) * N, hipMemcpyDeviceToHost);
hipMemcpy(hB, b, sizeof(int) * N, hipMemcpyDeviceToHost);
for (int i = 0; i < N; i++) {
printf("%d: %d[%d]\n", i, hA[i], hB[i]);
}
hipFree(a);
hipFree(b);
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z3runPiS_i
.globl _Z3runPiS_i
.p2align 8
.type _Z3runPiS_i,@function
_Z3runPiS_i:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b32 s3, s[0:1], 0x10
v_bfe_u32 v1, v0, 10, 10
v_and_b32_e32 v2, 0x3ff, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v1, s15, v1
s_waitcnt lgkmcnt(0)
s_lshr_b32 s4, s2, 16
s_and_b32 s2, s2, 0xffff
v_mul_lo_u32 v1, v1, s4
s_mul_i32 s2, s14, s2
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_add3_u32 v0, s2, v2, v1
s_mov_b32 s2, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s3, v0
s_cbranch_execz .LBB0_2
s_load_b128 s[0:3], s[0:1], 0x0
v_ashrrev_i32_e32 v1, 31, v0
v_mov_b32_e32 v5, s14
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[0:1]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v3, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v4, vcc_lo, s1, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s2, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
global_store_b32 v[3:4], v2, off
global_store_b32 v[0:1], v5, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z3runPiS_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z3runPiS_i, .Lfunc_end0-_Z3runPiS_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z3runPiS_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z3runPiS_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <stdio.h>
#include <hip/hip_runtime.h>
__global__ void run(int *a, int *b, int N) {
int idx = blockIdx.x * blockDim.x + blockIdx.y * blockDim.y + threadIdx.x + threadIdx.y * blockDim.y;
if (idx < N) {
a[idx] = threadIdx.x;
b[idx] = blockIdx.x;
}
}
int main() {
int *a, *b, N = 30;
hipMalloc((void **)&a, N * sizeof(int));
hipMalloc((void **)&b, N * sizeof(int));
dim3 gridSize(2, 2);
dim3 blockSize(2, 2);
// run<<<gridSize, blockSize>>>(a, b, N);
run<<<2, 4>>>(a, b, N);
int *hA = new int[N], *hB = new int[N];
hipMemcpy(hA, a, sizeof(int) * N, hipMemcpyDeviceToHost);
hipMemcpy(hB, b, sizeof(int) * N, hipMemcpyDeviceToHost);
for (int i = 0; i < N; i++) {
printf("%d: %d[%d]\n", i, hA[i], hB[i]);
}
hipFree(a);
hipFree(b);
} | .text
.file "intro.hip"
.globl _Z18__device_stub__runPiS_i # -- Begin function _Z18__device_stub__runPiS_i
.p2align 4, 0x90
.type _Z18__device_stub__runPiS_i,@function
_Z18__device_stub__runPiS_i: # @_Z18__device_stub__runPiS_i
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z3runPiS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z18__device_stub__runPiS_i, .Lfunc_end0-_Z18__device_stub__runPiS_i
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $128, %rsp
.cfi_def_cfa_offset 160
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
leaq 16(%rsp), %rdi
movl $120, %esi
callq hipMalloc
leaq 8(%rsp), %rdi
movl $120, %esi
callq hipMalloc
movabsq $4294967298, %rdi # imm = 0x100000002
leaq 2(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
movl $30, 28(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 28(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z3runPiS_i, %edi
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
movl $120, %edi
callq _Znam
movq %rax, %rbx
movl $120, %edi
callq _Znam
movq %rax, %r14
movq 16(%rsp), %rsi
movl $120, %edx
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
movq 8(%rsp), %rsi
movl $120, %edx
movq %r14, %rdi
movl $2, %ecx
callq hipMemcpy
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB1_3: # =>This Inner Loop Header: Depth=1
movl (%rbx,%r15,4), %edx
movl (%r14,%r15,4), %ecx
movl $.L.str, %edi
movl %r15d, %esi
xorl %eax, %eax
callq printf
incq %r15
cmpq $30, %r15
jne .LBB1_3
# %bb.4:
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $128, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z3runPiS_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z3runPiS_i,@object # @_Z3runPiS_i
.section .rodata,"a",@progbits
.globl _Z3runPiS_i
.p2align 3, 0x0
_Z3runPiS_i:
.quad _Z18__device_stub__runPiS_i
.size _Z3runPiS_i, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "%d: %d[%d]\n"
.size .L.str, 12
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z3runPiS_i"
.size .L__unnamed_1, 12
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z18__device_stub__runPiS_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z3runPiS_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z3runPiS_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2UR UR4, SR_CTAID.Y ; /* 0x00000000000479c3 */
/* 0x000e220000002600 */
/*0020*/ S2R R0, SR_TID.Y ; /* 0x0000000000007919 */
/* 0x000e280000002200 */
/*0030*/ S2R R7, SR_CTAID.X ; /* 0x0000000000077919 */
/* 0x000e680000002500 */
/*0040*/ S2R R6, SR_TID.X ; /* 0x0000000000067919 */
/* 0x000e620000002100 */
/*0050*/ IADD3 R0, R0, UR4, RZ ; /* 0x0000000400007c10 */
/* 0x001fe2000fffe0ff */
/*0060*/ IMAD R3, R7, c[0x0][0x0], R6 ; /* 0x0000000007037a24 */
/* 0x002fc800078e0206 */
/*0070*/ IMAD R0, R0, c[0x0][0x4], R3 ; /* 0x0000010000007a24 */
/* 0x000fca00078e0203 */
/*0080*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x170], PT ; /* 0x00005c0000007a0c */
/* 0x000fda0003f06270 */
/*0090*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00a0*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fe200000001ff */
/*00b0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*00c0*/ IMAD.WIDE R2, R0, R5, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fc800078e0205 */
/*00d0*/ IMAD.WIDE R4, R0, R5, c[0x0][0x168] ; /* 0x00005a0000047625 */
/* 0x000fe200078e0205 */
/*00e0*/ STG.E [R2.64], R6 ; /* 0x0000000602007986 */
/* 0x000fe8000c101904 */
/*00f0*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x000fe2000c101904 */
/*0100*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0110*/ BRA 0x110; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z3runPiS_i
.globl _Z3runPiS_i
.p2align 8
.type _Z3runPiS_i,@function
_Z3runPiS_i:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b32 s3, s[0:1], 0x10
v_bfe_u32 v1, v0, 10, 10
v_and_b32_e32 v2, 0x3ff, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v1, s15, v1
s_waitcnt lgkmcnt(0)
s_lshr_b32 s4, s2, 16
s_and_b32 s2, s2, 0xffff
v_mul_lo_u32 v1, v1, s4
s_mul_i32 s2, s14, s2
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_add3_u32 v0, s2, v2, v1
s_mov_b32 s2, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s3, v0
s_cbranch_execz .LBB0_2
s_load_b128 s[0:3], s[0:1], 0x0
v_ashrrev_i32_e32 v1, 31, v0
v_mov_b32_e32 v5, s14
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[0:1]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v3, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v4, vcc_lo, s1, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s2, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
global_store_b32 v[3:4], v2, off
global_store_b32 v[0:1], v5, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z3runPiS_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z3runPiS_i, .Lfunc_end0-_Z3runPiS_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z3runPiS_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z3runPiS_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0004bdfd_00000000-6_intro.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z25__device_stub__Z3runPiS_iPiS_i
.type _Z25__device_stub__Z3runPiS_iPiS_i, @function
_Z25__device_stub__Z3runPiS_iPiS_i:
.LFB2082:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z3runPiS_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z25__device_stub__Z3runPiS_iPiS_i, .-_Z25__device_stub__Z3runPiS_iPiS_i
.globl _Z3runPiS_i
.type _Z3runPiS_i, @function
_Z3runPiS_i:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z25__device_stub__Z3runPiS_iPiS_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z3runPiS_i, .-_Z3runPiS_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "%d: %d[%d]\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $56, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
movq %rsp, %rdi
movl $120, %esi
call cudaMalloc@PLT
leaq 8(%rsp), %rdi
movl $120, %esi
call cudaMalloc@PLT
movl $4, 28(%rsp)
movl $1, 32(%rsp)
movl $2, 16(%rsp)
movl $1, 20(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 28(%rsp), %rdx
movl $1, %ecx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L17
.L12:
movl $120, %edi
call _Znam@PLT
movq %rax, %r12
movl $120, %edi
call _Znam@PLT
movq %rax, %rbp
movl $2, %ecx
movl $120, %edx
movq (%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
movl $2, %ecx
movl $120, %edx
movq 8(%rsp), %rsi
movq %rbp, %rdi
call cudaMemcpy@PLT
movl $0, %ebx
leaq .LC0(%rip), %r13
.L13:
movl (%r12,%rbx,4), %ecx
movl 0(%rbp,%rbx,4), %r8d
movl %ebx, %edx
movq %r13, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $1, %rbx
cmpq $30, %rbx
jne .L13
movq (%rsp), %rdi
call cudaFree@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L18
movl $0, %eax
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L17:
.cfi_restore_state
movl $30, %edx
movq 8(%rsp), %rsi
movq (%rsp), %rdi
call _Z25__device_stub__Z3runPiS_iPiS_i
jmp .L12
.L18:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC1:
.string "_Z3runPiS_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z3runPiS_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "intro.hip"
.globl _Z18__device_stub__runPiS_i # -- Begin function _Z18__device_stub__runPiS_i
.p2align 4, 0x90
.type _Z18__device_stub__runPiS_i,@function
_Z18__device_stub__runPiS_i: # @_Z18__device_stub__runPiS_i
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z3runPiS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z18__device_stub__runPiS_i, .Lfunc_end0-_Z18__device_stub__runPiS_i
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $128, %rsp
.cfi_def_cfa_offset 160
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
leaq 16(%rsp), %rdi
movl $120, %esi
callq hipMalloc
leaq 8(%rsp), %rdi
movl $120, %esi
callq hipMalloc
movabsq $4294967298, %rdi # imm = 0x100000002
leaq 2(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
movl $30, 28(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 28(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z3runPiS_i, %edi
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
movl $120, %edi
callq _Znam
movq %rax, %rbx
movl $120, %edi
callq _Znam
movq %rax, %r14
movq 16(%rsp), %rsi
movl $120, %edx
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
movq 8(%rsp), %rsi
movl $120, %edx
movq %r14, %rdi
movl $2, %ecx
callq hipMemcpy
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB1_3: # =>This Inner Loop Header: Depth=1
movl (%rbx,%r15,4), %edx
movl (%r14,%r15,4), %ecx
movl $.L.str, %edi
movl %r15d, %esi
xorl %eax, %eax
callq printf
incq %r15
cmpq $30, %r15
jne .LBB1_3
# %bb.4:
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $128, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z3runPiS_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z3runPiS_i,@object # @_Z3runPiS_i
.section .rodata,"a",@progbits
.globl _Z3runPiS_i
.p2align 3, 0x0
_Z3runPiS_i:
.quad _Z18__device_stub__runPiS_i
.size _Z3runPiS_i, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "%d: %d[%d]\n"
.size .L.str, 12
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z3runPiS_i"
.size .L__unnamed_1, 12
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z18__device_stub__runPiS_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z3runPiS_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | //#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
#include <cuda.h>
#include <stdio.h>
__global__ void addKernel(int* c, const int* a, const int* b, int size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size) {
c[i] = a[i] + b[i];
}
}
// Helper function for using CUDA to add vectors in parallel.
void addWithCuda(int* c, const int* a, const int* b, int size) {
int* dev_a = nullptr;
int* dev_b = nullptr;
int* dev_c = nullptr;
// Allocate GPU buffers for three vectors (two input, one output)
cudaMalloc((void**)&dev_c, size * sizeof(int));
cudaMalloc((void**)&dev_a, size * sizeof(int));
cudaMalloc((void**)&dev_b, size * sizeof(int));
// Copy input vectors from host memory to GPU buffers.
cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
// Launch a kernel on the GPU with one thread for each element.
// 2 is number of computational blocks and (size + 1) / 2 is a number of threads in a block
addKernel<<<2, (size + 1) / 2>>>(dev_c, dev_a, dev_b, size);
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaDeviceSynchronize();
// Copy output vector from GPU buffer to host memory.
cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
}
int main(int argc, char** argv) {
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
addWithCuda(c, a, b, arraySize);
printf("{1, 2, 3, 4, 5} + {10, 20, 30, 40, 50} = {%d, %d, %d, %d, %d}\n", c[0], c[1], c[2], c[3], c[4]);
//cudaDeviceReset();
return 0;
} | code for sm_80
Function : _Z9addKernelPiPKiS1_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R6, R6, c[0x0][0x0], R3 ; /* 0x0000000006067a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R6, c[0x0][0x178], PT ; /* 0x00005e0006007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R4, R6, R7, c[0x0][0x170] ; /* 0x00005c0006047625 */
/* 0x000fc800078e0207 */
/*0090*/ IMAD.WIDE R2, R6.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0006027625 */
/* 0x0c0fe400078e0207 */
/*00a0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*00b0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*00c0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x160] ; /* 0x0000580006067625 */
/* 0x000fe200078e0207 */
/*00d0*/ IADD3 R9, R4, R3, RZ ; /* 0x0000000304097210 */
/* 0x004fca0007ffe0ff */
/*00e0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0100*/ BRA 0x100; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | //#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
#include <cuda.h>
#include <stdio.h>
__global__ void addKernel(int* c, const int* a, const int* b, int size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size) {
c[i] = a[i] + b[i];
}
}
// Helper function for using CUDA to add vectors in parallel.
void addWithCuda(int* c, const int* a, const int* b, int size) {
int* dev_a = nullptr;
int* dev_b = nullptr;
int* dev_c = nullptr;
// Allocate GPU buffers for three vectors (two input, one output)
cudaMalloc((void**)&dev_c, size * sizeof(int));
cudaMalloc((void**)&dev_a, size * sizeof(int));
cudaMalloc((void**)&dev_b, size * sizeof(int));
// Copy input vectors from host memory to GPU buffers.
cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
// Launch a kernel on the GPU with one thread for each element.
// 2 is number of computational blocks and (size + 1) / 2 is a number of threads in a block
addKernel<<<2, (size + 1) / 2>>>(dev_c, dev_a, dev_b, size);
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaDeviceSynchronize();
// Copy output vector from GPU buffer to host memory.
cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
}
int main(int argc, char** argv) {
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
addWithCuda(c, a, b, arraySize);
printf("{1, 2, 3, 4, 5} + {10, 20, 30, 40, 50} = {%d, %d, %d, %d, %d}\n", c[0], c[1], c[2], c[3], c[4]);
//cudaDeviceReset();
return 0;
} | .file "tmpxft_00022073_00000000-6_addincuda.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z35__device_stub__Z9addKernelPiPKiS1_iPiPKiS1_i
.type _Z35__device_stub__Z9addKernelPiPKiS1_iPiPKiS1_i, @function
_Z35__device_stub__Z9addKernelPiPKiS1_iPiPKiS1_i:
.LFB2083:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z9addKernelPiPKiS1_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z35__device_stub__Z9addKernelPiPKiS1_iPiPKiS1_i, .-_Z35__device_stub__Z9addKernelPiPKiS1_iPiPKiS1_i
.globl _Z9addKernelPiPKiS1_i
.type _Z9addKernelPiPKiS1_i, @function
_Z9addKernelPiPKiS1_i:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z35__device_stub__Z9addKernelPiPKiS1_iPiPKiS1_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z9addKernelPiPKiS1_i, .-_Z9addKernelPiPKiS1_i
.globl _Z11addWithCudaPiPKiS1_i
.type _Z11addWithCudaPiPKiS1_i, @function
_Z11addWithCudaPiPKiS1_i:
.LFB2057:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $64, %rsp
.cfi_def_cfa_offset 112
movq %rdi, %r12
movq %rsi, %r14
movq %rdx, %r13
movl %ecx, %ebp
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
movq $0, 8(%rsp)
movq $0, 16(%rsp)
movq $0, 24(%rsp)
movslq %ecx, %rbx
salq $2, %rbx
leaq 24(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leaq 8(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbx, %rdx
movq %r14, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movq %rbx, %rdx
movq %r13, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
leal 1(%rbp), %edx
movl %edx, %eax
shrl $31, %eax
addl %edx, %eax
sarl %eax
movl %eax, 44(%rsp)
movl $1, 48(%rsp)
movl $2, 32(%rsp)
movl $1, 36(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L15
.L12:
call cudaDeviceSynchronize@PLT
movl $2, %ecx
movq %rbx, %rdx
movq 24(%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $64, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
movl %ebp, %ecx
movq 16(%rsp), %rdx
movq 8(%rsp), %rsi
movq 24(%rsp), %rdi
call _Z35__device_stub__Z9addKernelPiPKiS1_iPiPKiS1_i
jmp .L12
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size _Z11addWithCudaPiPKiS1_i, .-_Z11addWithCudaPiPKiS1_i
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "{1, 2, 3, 4, 5} + {10, 20, 30, 40, 50} = {%d, %d, %d, %d, %d}\n"
.text
.globl main
.type main, @function
main:
.LFB2058:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
movl $1, (%rsp)
movl $2, 4(%rsp)
movl $3, 8(%rsp)
movl $4, 12(%rsp)
movl $5, 16(%rsp)
movl $10, 32(%rsp)
movl $20, 36(%rsp)
movl $30, 40(%rsp)
movl $40, 44(%rsp)
movl $50, 48(%rsp)
pxor %xmm0, %xmm0
movaps %xmm0, 64(%rsp)
movl $0, 80(%rsp)
leaq 32(%rsp), %rdx
movq %rsp, %rsi
leaq 64(%rsp), %rdi
movl $5, %ecx
call _Z11addWithCudaPiPKiS1_i
subq $8, %rsp
.cfi_def_cfa_offset 120
movl 88(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 128
movl 92(%rsp), %r9d
movl 88(%rsp), %r8d
movl 84(%rsp), %ecx
movl 80(%rsp), %edx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $16, %rsp
.cfi_def_cfa_offset 112
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L20
movl $0, %eax
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L20:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2058:
.size main, .-main
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "_Z9addKernelPiPKiS1_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z9addKernelPiPKiS1_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | //#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
#include <cuda.h>
#include <stdio.h>
__global__ void addKernel(int* c, const int* a, const int* b, int size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size) {
c[i] = a[i] + b[i];
}
}
// Helper function for using CUDA to add vectors in parallel.
void addWithCuda(int* c, const int* a, const int* b, int size) {
int* dev_a = nullptr;
int* dev_b = nullptr;
int* dev_c = nullptr;
// Allocate GPU buffers for three vectors (two input, one output)
cudaMalloc((void**)&dev_c, size * sizeof(int));
cudaMalloc((void**)&dev_a, size * sizeof(int));
cudaMalloc((void**)&dev_b, size * sizeof(int));
// Copy input vectors from host memory to GPU buffers.
cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
// Launch a kernel on the GPU with one thread for each element.
// 2 is number of computational blocks and (size + 1) / 2 is a number of threads in a block
addKernel<<<2, (size + 1) / 2>>>(dev_c, dev_a, dev_b, size);
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaDeviceSynchronize();
// Copy output vector from GPU buffer to host memory.
cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
}
int main(int argc, char** argv) {
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
addWithCuda(c, a, b, arraySize);
printf("{1, 2, 3, 4, 5} + {10, 20, 30, 40, 50} = {%d, %d, %d, %d, %d}\n", c[0], c[1], c[2], c[3], c[4]);
//cudaDeviceReset();
return 0;
} | //#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
#include <hip/hip_runtime.h>
#include <stdio.h>
__global__ void addKernel(int* c, const int* a, const int* b, int size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size) {
c[i] = a[i] + b[i];
}
}
// Helper function for using CUDA to add vectors in parallel.
void addWithCuda(int* c, const int* a, const int* b, int size) {
int* dev_a = nullptr;
int* dev_b = nullptr;
int* dev_c = nullptr;
// Allocate GPU buffers for three vectors (two input, one output)
hipMalloc((void**)&dev_c, size * sizeof(int));
hipMalloc((void**)&dev_a, size * sizeof(int));
hipMalloc((void**)&dev_b, size * sizeof(int));
// Copy input vectors from host memory to GPU buffers.
hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice);
// Launch a kernel on the GPU with one thread for each element.
// 2 is number of computational blocks and (size + 1) / 2 is a number of threads in a block
addKernel<<<2, (size + 1) / 2>>>(dev_c, dev_a, dev_b, size);
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
hipDeviceSynchronize();
// Copy output vector from GPU buffer to host memory.
hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost);
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
}
int main(int argc, char** argv) {
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
addWithCuda(c, a, b, arraySize);
printf("{1, 2, 3, 4, 5} + {10, 20, 30, 40, 50} = {%d, %d, %d, %d, %d}\n", c[0], c[1], c[2], c[3], c[4]);
//cudaDeviceReset();
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | //#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
#include <hip/hip_runtime.h>
#include <stdio.h>
__global__ void addKernel(int* c, const int* a, const int* b, int size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size) {
c[i] = a[i] + b[i];
}
}
// Helper function for using CUDA to add vectors in parallel.
void addWithCuda(int* c, const int* a, const int* b, int size) {
int* dev_a = nullptr;
int* dev_b = nullptr;
int* dev_c = nullptr;
// Allocate GPU buffers for three vectors (two input, one output)
hipMalloc((void**)&dev_c, size * sizeof(int));
hipMalloc((void**)&dev_a, size * sizeof(int));
hipMalloc((void**)&dev_b, size * sizeof(int));
// Copy input vectors from host memory to GPU buffers.
hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice);
// Launch a kernel on the GPU with one thread for each element.
// 2 is number of computational blocks and (size + 1) / 2 is a number of threads in a block
addKernel<<<2, (size + 1) / 2>>>(dev_c, dev_a, dev_b, size);
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
hipDeviceSynchronize();
// Copy output vector from GPU buffer to host memory.
hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost);
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
}
int main(int argc, char** argv) {
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
addWithCuda(c, a, b, arraySize);
printf("{1, 2, 3, 4, 5} + {10, 20, 30, 40, 50} = {%d, %d, %d, %d, %d}\n", c[0], c[1], c[2], c[3], c[4]);
//cudaDeviceReset();
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z9addKernelPiPKiS1_i
.globl _Z9addKernelPiPKiS1_i
.p2align 8
.type _Z9addKernelPiPKiS1_i,@function
_Z9addKernelPiPKiS1_i:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b32 s3, s[0:1], 0x18
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_clause 0x1
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[0:1], s[0:1], 0x10
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s6, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s1, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s4, v0
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s5, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v2, v3, v2
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9addKernelPiPKiS1_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z9addKernelPiPKiS1_i, .Lfunc_end0-_Z9addKernelPiPKiS1_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9addKernelPiPKiS1_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z9addKernelPiPKiS1_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | //#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
#include <hip/hip_runtime.h>
#include <stdio.h>
__global__ void addKernel(int* c, const int* a, const int* b, int size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size) {
c[i] = a[i] + b[i];
}
}
// Helper function for using CUDA to add vectors in parallel.
void addWithCuda(int* c, const int* a, const int* b, int size) {
int* dev_a = nullptr;
int* dev_b = nullptr;
int* dev_c = nullptr;
// Allocate GPU buffers for three vectors (two input, one output)
hipMalloc((void**)&dev_c, size * sizeof(int));
hipMalloc((void**)&dev_a, size * sizeof(int));
hipMalloc((void**)&dev_b, size * sizeof(int));
// Copy input vectors from host memory to GPU buffers.
hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice);
// Launch a kernel on the GPU with one thread for each element.
// 2 is number of computational blocks and (size + 1) / 2 is a number of threads in a block
addKernel<<<2, (size + 1) / 2>>>(dev_c, dev_a, dev_b, size);
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
hipDeviceSynchronize();
// Copy output vector from GPU buffer to host memory.
hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost);
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
}
int main(int argc, char** argv) {
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
addWithCuda(c, a, b, arraySize);
printf("{1, 2, 3, 4, 5} + {10, 20, 30, 40, 50} = {%d, %d, %d, %d, %d}\n", c[0], c[1], c[2], c[3], c[4]);
//cudaDeviceReset();
return 0;
} | .text
.file "addincuda.hip"
.globl _Z24__device_stub__addKernelPiPKiS1_i # -- Begin function _Z24__device_stub__addKernelPiPKiS1_i
.p2align 4, 0x90
.type _Z24__device_stub__addKernelPiPKiS1_i,@function
_Z24__device_stub__addKernelPiPKiS1_i: # @_Z24__device_stub__addKernelPiPKiS1_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9addKernelPiPKiS1_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z24__device_stub__addKernelPiPKiS1_i, .Lfunc_end0-_Z24__device_stub__addKernelPiPKiS1_i
.cfi_endproc
# -- End function
.globl _Z11addWithCudaPiPKiS1_i # -- Begin function _Z11addWithCudaPiPKiS1_i
.p2align 4, 0x90
.type _Z11addWithCudaPiPKiS1_i,@function
_Z11addWithCudaPiPKiS1_i: # @_Z11addWithCudaPiPKiS1_i
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $152, %rsp
.cfi_def_cfa_offset 208
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %ecx, %ebp
movq %rdx, %r15
movq %rsi, %r12
movq %rdi, %rbx
movq $0, 24(%rsp)
movq $0, 16(%rsp)
movq $0, 8(%rsp)
movslq %ecx, %r13
leaq (,%r13,4), %r14
leaq 8(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
leaq 24(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
leaq 16(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
movq 24(%rsp), %rdi
movq %r12, %rsi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
movq 16(%rsp), %rdi
movq %r15, %rsi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
movl %r13d, %eax
incl %eax
shrl $31, %eax
leal (%rax,%r13), %edx
incl %edx
sarl %edx
movabsq $4294967296, %rdi # imm = 0x100000000
orq %rdi, %rdx
orq $2, %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
movq 8(%rsp), %rax
movq 24(%rsp), %rcx
movq 16(%rsp), %rdx
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movq %rdx, 88(%rsp)
movl %ebp, 36(%rsp)
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 88(%rsp), %rax
movq %rax, 128(%rsp)
leaq 36(%rsp), %rax
movq %rax, 136(%rsp)
leaq 72(%rsp), %rdi
leaq 56(%rsp), %rsi
leaq 48(%rsp), %rdx
leaq 40(%rsp), %rcx
callq __hipPopCallConfiguration
movq 72(%rsp), %rsi
movl 80(%rsp), %edx
movq 56(%rsp), %rcx
movl 64(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z9addKernelPiPKiS1_i, %edi
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
callq hipDeviceSynchronize
movq 8(%rsp), %rsi
movq %rbx, %rdi
movq %r14, %rdx
movl $2, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
addq $152, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z11addWithCudaPiPKiS1_i, .Lfunc_end1-_Z11addWithCudaPiPKiS1_i
.cfi_endproc
# -- End function
.section .rodata.cst16,"aM",@progbits,16
.p2align 4, 0x0 # -- Begin function main
.LCPI2_0:
.long 1 # 0x1
.long 2 # 0x2
.long 3 # 0x3
.long 4 # 0x4
.LCPI2_1:
.long 10 # 0xa
.long 20 # 0x14
.long 30 # 0x1e
.long 40 # 0x28
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movaps .LCPI2_0(%rip), %xmm0 # xmm0 = [1,2,3,4]
movaps %xmm0, 64(%rsp)
movl $5, 80(%rsp)
movaps .LCPI2_1(%rip), %xmm0 # xmm0 = [10,20,30,40]
movaps %xmm0, 32(%rsp)
movl $50, 48(%rsp)
xorps %xmm0, %xmm0
movaps %xmm0, (%rsp)
movl $0, 16(%rsp)
movq %rsp, %rdi
leaq 64(%rsp), %rsi
leaq 32(%rsp), %rdx
movl $5, %ecx
callq _Z11addWithCudaPiPKiS1_i
movl (%rsp), %esi
movl 4(%rsp), %edx
movl 8(%rsp), %ecx
movl 12(%rsp), %r8d
movl 16(%rsp), %r9d
movl $.L.str, %edi
xorl %eax, %eax
callq printf
xorl %eax, %eax
addq $88, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9addKernelPiPKiS1_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9addKernelPiPKiS1_i,@object # @_Z9addKernelPiPKiS1_i
.section .rodata,"a",@progbits
.globl _Z9addKernelPiPKiS1_i
.p2align 3, 0x0
_Z9addKernelPiPKiS1_i:
.quad _Z24__device_stub__addKernelPiPKiS1_i
.size _Z9addKernelPiPKiS1_i, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "{1, 2, 3, 4, 5} + {10, 20, 30, 40, 50} = {%d, %d, %d, %d, %d}\n"
.size .L.str, 63
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z9addKernelPiPKiS1_i"
.size .L__unnamed_1, 22
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__addKernelPiPKiS1_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9addKernelPiPKiS1_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z9addKernelPiPKiS1_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R6, R6, c[0x0][0x0], R3 ; /* 0x0000000006067a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R6, c[0x0][0x178], PT ; /* 0x00005e0006007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R4, R6, R7, c[0x0][0x170] ; /* 0x00005c0006047625 */
/* 0x000fc800078e0207 */
/*0090*/ IMAD.WIDE R2, R6.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0006027625 */
/* 0x0c0fe400078e0207 */
/*00a0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*00b0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*00c0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x160] ; /* 0x0000580006067625 */
/* 0x000fe200078e0207 */
/*00d0*/ IADD3 R9, R4, R3, RZ ; /* 0x0000000304097210 */
/* 0x004fca0007ffe0ff */
/*00e0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0100*/ BRA 0x100; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z9addKernelPiPKiS1_i
.globl _Z9addKernelPiPKiS1_i
.p2align 8
.type _Z9addKernelPiPKiS1_i,@function
_Z9addKernelPiPKiS1_i:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b32 s3, s[0:1], 0x18
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_clause 0x1
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[0:1], s[0:1], 0x10
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s6, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s1, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s4, v0
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s5, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v2, v3, v2
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9addKernelPiPKiS1_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z9addKernelPiPKiS1_i, .Lfunc_end0-_Z9addKernelPiPKiS1_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9addKernelPiPKiS1_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z9addKernelPiPKiS1_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00022073_00000000-6_addincuda.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z35__device_stub__Z9addKernelPiPKiS1_iPiPKiS1_i
.type _Z35__device_stub__Z9addKernelPiPKiS1_iPiPKiS1_i, @function
_Z35__device_stub__Z9addKernelPiPKiS1_iPiPKiS1_i:
.LFB2083:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z9addKernelPiPKiS1_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z35__device_stub__Z9addKernelPiPKiS1_iPiPKiS1_i, .-_Z35__device_stub__Z9addKernelPiPKiS1_iPiPKiS1_i
.globl _Z9addKernelPiPKiS1_i
.type _Z9addKernelPiPKiS1_i, @function
_Z9addKernelPiPKiS1_i:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z35__device_stub__Z9addKernelPiPKiS1_iPiPKiS1_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z9addKernelPiPKiS1_i, .-_Z9addKernelPiPKiS1_i
.globl _Z11addWithCudaPiPKiS1_i
.type _Z11addWithCudaPiPKiS1_i, @function
_Z11addWithCudaPiPKiS1_i:
.LFB2057:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $64, %rsp
.cfi_def_cfa_offset 112
movq %rdi, %r12
movq %rsi, %r14
movq %rdx, %r13
movl %ecx, %ebp
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
movq $0, 8(%rsp)
movq $0, 16(%rsp)
movq $0, 24(%rsp)
movslq %ecx, %rbx
salq $2, %rbx
leaq 24(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leaq 8(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbx, %rdx
movq %r14, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movq %rbx, %rdx
movq %r13, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
leal 1(%rbp), %edx
movl %edx, %eax
shrl $31, %eax
addl %edx, %eax
sarl %eax
movl %eax, 44(%rsp)
movl $1, 48(%rsp)
movl $2, 32(%rsp)
movl $1, 36(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L15
.L12:
call cudaDeviceSynchronize@PLT
movl $2, %ecx
movq %rbx, %rdx
movq 24(%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $64, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
movl %ebp, %ecx
movq 16(%rsp), %rdx
movq 8(%rsp), %rsi
movq 24(%rsp), %rdi
call _Z35__device_stub__Z9addKernelPiPKiS1_iPiPKiS1_i
jmp .L12
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size _Z11addWithCudaPiPKiS1_i, .-_Z11addWithCudaPiPKiS1_i
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "{1, 2, 3, 4, 5} + {10, 20, 30, 40, 50} = {%d, %d, %d, %d, %d}\n"
.text
.globl main
.type main, @function
main:
.LFB2058:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
movl $1, (%rsp)
movl $2, 4(%rsp)
movl $3, 8(%rsp)
movl $4, 12(%rsp)
movl $5, 16(%rsp)
movl $10, 32(%rsp)
movl $20, 36(%rsp)
movl $30, 40(%rsp)
movl $40, 44(%rsp)
movl $50, 48(%rsp)
pxor %xmm0, %xmm0
movaps %xmm0, 64(%rsp)
movl $0, 80(%rsp)
leaq 32(%rsp), %rdx
movq %rsp, %rsi
leaq 64(%rsp), %rdi
movl $5, %ecx
call _Z11addWithCudaPiPKiS1_i
subq $8, %rsp
.cfi_def_cfa_offset 120
movl 88(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 128
movl 92(%rsp), %r9d
movl 88(%rsp), %r8d
movl 84(%rsp), %ecx
movl 80(%rsp), %edx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $16, %rsp
.cfi_def_cfa_offset 112
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L20
movl $0, %eax
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L20:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2058:
.size main, .-main
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "_Z9addKernelPiPKiS1_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z9addKernelPiPKiS1_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "addincuda.hip"
.globl _Z24__device_stub__addKernelPiPKiS1_i # -- Begin function _Z24__device_stub__addKernelPiPKiS1_i
.p2align 4, 0x90
.type _Z24__device_stub__addKernelPiPKiS1_i,@function
_Z24__device_stub__addKernelPiPKiS1_i: # @_Z24__device_stub__addKernelPiPKiS1_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9addKernelPiPKiS1_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z24__device_stub__addKernelPiPKiS1_i, .Lfunc_end0-_Z24__device_stub__addKernelPiPKiS1_i
.cfi_endproc
# -- End function
.globl _Z11addWithCudaPiPKiS1_i # -- Begin function _Z11addWithCudaPiPKiS1_i
.p2align 4, 0x90
.type _Z11addWithCudaPiPKiS1_i,@function
_Z11addWithCudaPiPKiS1_i: # @_Z11addWithCudaPiPKiS1_i
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $152, %rsp
.cfi_def_cfa_offset 208
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %ecx, %ebp
movq %rdx, %r15
movq %rsi, %r12
movq %rdi, %rbx
movq $0, 24(%rsp)
movq $0, 16(%rsp)
movq $0, 8(%rsp)
movslq %ecx, %r13
leaq (,%r13,4), %r14
leaq 8(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
leaq 24(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
leaq 16(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
movq 24(%rsp), %rdi
movq %r12, %rsi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
movq 16(%rsp), %rdi
movq %r15, %rsi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
movl %r13d, %eax
incl %eax
shrl $31, %eax
leal (%rax,%r13), %edx
incl %edx
sarl %edx
movabsq $4294967296, %rdi # imm = 0x100000000
orq %rdi, %rdx
orq $2, %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
movq 8(%rsp), %rax
movq 24(%rsp), %rcx
movq 16(%rsp), %rdx
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movq %rdx, 88(%rsp)
movl %ebp, 36(%rsp)
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 88(%rsp), %rax
movq %rax, 128(%rsp)
leaq 36(%rsp), %rax
movq %rax, 136(%rsp)
leaq 72(%rsp), %rdi
leaq 56(%rsp), %rsi
leaq 48(%rsp), %rdx
leaq 40(%rsp), %rcx
callq __hipPopCallConfiguration
movq 72(%rsp), %rsi
movl 80(%rsp), %edx
movq 56(%rsp), %rcx
movl 64(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z9addKernelPiPKiS1_i, %edi
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
callq hipDeviceSynchronize
movq 8(%rsp), %rsi
movq %rbx, %rdi
movq %r14, %rdx
movl $2, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
addq $152, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z11addWithCudaPiPKiS1_i, .Lfunc_end1-_Z11addWithCudaPiPKiS1_i
.cfi_endproc
# -- End function
.section .rodata.cst16,"aM",@progbits,16
.p2align 4, 0x0 # -- Begin function main
.LCPI2_0:
.long 1 # 0x1
.long 2 # 0x2
.long 3 # 0x3
.long 4 # 0x4
.LCPI2_1:
.long 10 # 0xa
.long 20 # 0x14
.long 30 # 0x1e
.long 40 # 0x28
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movaps .LCPI2_0(%rip), %xmm0 # xmm0 = [1,2,3,4]
movaps %xmm0, 64(%rsp)
movl $5, 80(%rsp)
movaps .LCPI2_1(%rip), %xmm0 # xmm0 = [10,20,30,40]
movaps %xmm0, 32(%rsp)
movl $50, 48(%rsp)
xorps %xmm0, %xmm0
movaps %xmm0, (%rsp)
movl $0, 16(%rsp)
movq %rsp, %rdi
leaq 64(%rsp), %rsi
leaq 32(%rsp), %rdx
movl $5, %ecx
callq _Z11addWithCudaPiPKiS1_i
movl (%rsp), %esi
movl 4(%rsp), %edx
movl 8(%rsp), %ecx
movl 12(%rsp), %r8d
movl 16(%rsp), %r9d
movl $.L.str, %edi
xorl %eax, %eax
callq printf
xorl %eax, %eax
addq $88, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9addKernelPiPKiS1_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9addKernelPiPKiS1_i,@object # @_Z9addKernelPiPKiS1_i
.section .rodata,"a",@progbits
.globl _Z9addKernelPiPKiS1_i
.p2align 3, 0x0
_Z9addKernelPiPKiS1_i:
.quad _Z24__device_stub__addKernelPiPKiS1_i
.size _Z9addKernelPiPKiS1_i, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "{1, 2, 3, 4, 5} + {10, 20, 30, 40, 50} = {%d, %d, %d, %d, %d}\n"
.size .L.str, 63
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z9addKernelPiPKiS1_i"
.size .L__unnamed_1, 22
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__addKernelPiPKiS1_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9addKernelPiPKiS1_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | //pass
//--blockDim=10 --gridDim=64 --no-inline
#include "cuda.h"
__device__ void bar(int* p) {
p[threadIdx.x] = 0;
}
__global__ void foo() {
__shared__ int A[10];
int* p = A;
bar(p);
} | code for sm_80
Function : _Z3foov
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0020*/ BRA 0x20; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0030*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0040*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0050*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0060*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0070*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0080*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0090*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | //pass
//--blockDim=10 --gridDim=64 --no-inline
#include "cuda.h"
__device__ void bar(int* p) {
p[threadIdx.x] = 0;
}
__global__ void foo() {
__shared__ int A[10];
int* p = A;
bar(p);
} | .file "tmpxft_000bd59d_00000000-6_kernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2030:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2030:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z3barPi
.type _Z3barPi, @function
_Z3barPi:
.LFB2027:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2027:
.size _Z3barPi, .-_Z3barPi
.globl _Z21__device_stub__Z3foovv
.type _Z21__device_stub__Z3foovv, @function
_Z21__device_stub__Z3foovv:
.LFB2052:
.cfi_startproc
endbr64
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
leaq 8(%rsp), %rcx
movq %rsp, %rdx
leaq 28(%rsp), %rsi
leaq 16(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L9
.L5:
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L10
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L9:
.cfi_restore_state
pushq 8(%rsp)
.cfi_def_cfa_offset 104
pushq 8(%rsp)
.cfi_def_cfa_offset 112
leaq 80(%rsp), %r9
movq 44(%rsp), %rcx
movl 52(%rsp), %r8d
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
leaq _Z3foov(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 96
jmp .L5
.L10:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2052:
.size _Z21__device_stub__Z3foovv, .-_Z21__device_stub__Z3foovv
.globl _Z3foov
.type _Z3foov, @function
_Z3foov:
.LFB2053:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z21__device_stub__Z3foovv
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2053:
.size _Z3foov, .-_Z3foov
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z3foov"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2055:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z3foov(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2055:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | //pass
//--blockDim=10 --gridDim=64 --no-inline
#include "cuda.h"
__device__ void bar(int* p) {
p[threadIdx.x] = 0;
}
__global__ void foo() {
__shared__ int A[10];
int* p = A;
bar(p);
} | //pass
//--blockDim=10 --gridDim=64 --no-inline
#include "hip/hip_runtime.h"
__device__ void bar(int* p) {
p[threadIdx.x] = 0;
}
__global__ void foo() {
__shared__ int A[10];
int* p = A;
bar(p);
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | //pass
//--blockDim=10 --gridDim=64 --no-inline
#include "hip/hip_runtime.h"
__device__ void bar(int* p) {
p[threadIdx.x] = 0;
}
__global__ void foo() {
__shared__ int A[10];
int* p = A;
bar(p);
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z3foov
.globl _Z3foov
.p2align 8
.type _Z3foov,@function
_Z3foov:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z3foov
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 0
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 0
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 1
.amdhsa_next_free_sgpr 1
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z3foov, .Lfunc_end0-_Z3foov
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args: []
.group_segment_fixed_size: 0
.kernarg_segment_align: 4
.kernarg_segment_size: 0
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z3foov
.private_segment_fixed_size: 0
.sgpr_count: 0
.sgpr_spill_count: 0
.symbol: _Z3foov.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 0
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | //pass
//--blockDim=10 --gridDim=64 --no-inline
#include "hip/hip_runtime.h"
__device__ void bar(int* p) {
p[threadIdx.x] = 0;
}
__global__ void foo() {
__shared__ int A[10];
int* p = A;
bar(p);
} | .text
.file "kernel.hip"
.globl _Z18__device_stub__foov # -- Begin function _Z18__device_stub__foov
.p2align 4, 0x90
.type _Z18__device_stub__foov,@function
_Z18__device_stub__foov: # @_Z18__device_stub__foov
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z3foov, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $72, %rsp
.cfi_adjust_cfa_offset -72
retq
.Lfunc_end0:
.size _Z18__device_stub__foov, .Lfunc_end0-_Z18__device_stub__foov
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z3foov, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z3foov,@object # @_Z3foov
.section .rodata,"a",@progbits
.globl _Z3foov
.p2align 3, 0x0
_Z3foov:
.quad _Z18__device_stub__foov
.size _Z3foov, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z3foov"
.size .L__unnamed_1, 8
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z18__device_stub__foov
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z3foov
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z3foov
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0020*/ BRA 0x20; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0030*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0040*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0050*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0060*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0070*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0080*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0090*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z3foov
.globl _Z3foov
.p2align 8
.type _Z3foov,@function
_Z3foov:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z3foov
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 0
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 0
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 1
.amdhsa_next_free_sgpr 1
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z3foov, .Lfunc_end0-_Z3foov
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args: []
.group_segment_fixed_size: 0
.kernarg_segment_align: 4
.kernarg_segment_size: 0
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z3foov
.private_segment_fixed_size: 0
.sgpr_count: 0
.sgpr_spill_count: 0
.symbol: _Z3foov.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 0
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000bd59d_00000000-6_kernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2030:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2030:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z3barPi
.type _Z3barPi, @function
_Z3barPi:
.LFB2027:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2027:
.size _Z3barPi, .-_Z3barPi
.globl _Z21__device_stub__Z3foovv
.type _Z21__device_stub__Z3foovv, @function
_Z21__device_stub__Z3foovv:
.LFB2052:
.cfi_startproc
endbr64
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
leaq 8(%rsp), %rcx
movq %rsp, %rdx
leaq 28(%rsp), %rsi
leaq 16(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L9
.L5:
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L10
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L9:
.cfi_restore_state
pushq 8(%rsp)
.cfi_def_cfa_offset 104
pushq 8(%rsp)
.cfi_def_cfa_offset 112
leaq 80(%rsp), %r9
movq 44(%rsp), %rcx
movl 52(%rsp), %r8d
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
leaq _Z3foov(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 96
jmp .L5
.L10:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2052:
.size _Z21__device_stub__Z3foovv, .-_Z21__device_stub__Z3foovv
.globl _Z3foov
.type _Z3foov, @function
_Z3foov:
.LFB2053:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z21__device_stub__Z3foovv
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2053:
.size _Z3foov, .-_Z3foov
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z3foov"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2055:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z3foov(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2055:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "kernel.hip"
.globl _Z18__device_stub__foov # -- Begin function _Z18__device_stub__foov
.p2align 4, 0x90
.type _Z18__device_stub__foov,@function
_Z18__device_stub__foov: # @_Z18__device_stub__foov
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z3foov, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $72, %rsp
.cfi_adjust_cfa_offset -72
retq
.Lfunc_end0:
.size _Z18__device_stub__foov, .Lfunc_end0-_Z18__device_stub__foov
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z3foov, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z3foov,@object # @_Z3foov
.section .rodata,"a",@progbits
.globl _Z3foov
.p2align 3, 0x0
_Z3foov:
.quad _Z18__device_stub__foov
.size _Z3foov, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z3foov"
.size .L__unnamed_1, 8
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z18__device_stub__foov
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z3foov
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
__global__ void vectorSum(float *a, float *b, float *c){
int i = threadIdx.x + blockIdx.x * blockDim.x;
c[i] = a[i] + b[i];
}
int main(int argc, char *argv[]){
unsigned int length = 4194304;
int i, Size;
float *a, *b, *c, *copyC, *gpuA, *gpuB, *gpuC;
time_t seed;
cudaEvent_t start;
cudaEvent_t stop;
float msecTotal;
cudaEventCreate(&start);
cudaEventCreate(&stop);
if (argc>1)
sscanf(argv[1],"%d",&length);
Size = sizeof(float)*length;
unsigned long int padded_length = floor((length + ((512*32)-1))/(1.0*512*32)) * (1.0*512*32);
a = (float *)calloc(length, sizeof(float));
b = (float *)calloc(length, sizeof(float));
c = (float *)calloc(length, sizeof(float));
copyC = (float *)calloc(length, sizeof(float));
time(&seed);
srand48(seed);
for (i=0; i<length; i++)
a[i] = drand48(), b[i] = drand48();
cudaSetDevice(0);
cudaError_t error;
error = cudaMalloc((void**)&gpuA, padded_length*sizeof(float));
if (error != cudaSuccess) {
printf("oops, %d, error: %d\n", __LINE__, error);
exit(EXIT_FAILURE);
}
error = cudaMemset(gpuA, 0, padded_length*sizeof(float));
if (error != cudaSuccess) {
printf("oops, %d, error: %d\n", __LINE__, error);
exit(EXIT_FAILURE);
}
error = cudaMalloc((void**)&gpuB, padded_length*sizeof(float));
if (error != cudaSuccess) {
printf("oops, %d, error: %d\n", __LINE__, error);
exit(EXIT_FAILURE);
}
error = cudaMemset(gpuB, 0, padded_length*sizeof(float));
if (error != cudaSuccess) {
printf("oops, %d, error: %d\n", __LINE__, error);
exit(EXIT_FAILURE);
}
error = cudaMalloc((void**)&gpuC, padded_length*sizeof(float));
if (error != cudaSuccess) {
printf("oops, %d, error: %d\n", __LINE__, error);
exit(EXIT_FAILURE);
}
error = cudaMemset(gpuC, 0, padded_length*sizeof(float));
if (error != cudaSuccess) {
printf("oops, %d, error: %d\n", __LINE__, error);
exit(EXIT_FAILURE);
}
cudaEventRecord(start, NULL);
for (i=0; i<length; i++)
c[i] = a[i] + b[i];
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&msecTotal, start, stop);
printf("cpu time: %.3f ms\n", msecTotal);
error = cudaMemcpy(gpuA, a, Size, cudaMemcpyHostToDevice);
if (error != cudaSuccess) {
printf("oops, %d, error: %d\n", __LINE__, error);
exit(EXIT_FAILURE);
}
error = cudaMemcpy(gpuB, b, Size, cudaMemcpyHostToDevice);
if (error != cudaSuccess) {
printf("oops, %d, error: %d\n", __LINE__, error);
exit(EXIT_FAILURE);
}
dim3 numThreads(512, 1);
dim3 numBlocks(32, 1);
cudaEventRecord(start, NULL);
vectorSum<<<numBlocks, numThreads>>>(gpuA, gpuB, gpuC);
cudaDeviceSynchronize();
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
error = cudaMemcpy(copyC, gpuC, Size, cudaMemcpyDeviceToHost);
if (error != cudaSuccess) {
printf("oops, %d, error: %d\n", __LINE__, error);
exit(EXIT_FAILURE);
}
cudaEventElapsedTime(&msecTotal, start, stop);
printf("gpu time: %.3f ms\n", msecTotal);
for (i=0; i<length; i++)
if (fabs(c[i]-copyC[i]) > 0.000001){
printf("%d\t%f\t%f\n", i, c[i], copyC[i]);
return 1;
}
return 0;
} | code for sm_80
Function : _Z9vectorSumPfS_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_TID.X ; /* 0x0000000000067919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e240000002500 */
/*0050*/ IMAD R6, R3, c[0x0][0x0], R6 ; /* 0x0000000003067a24 */
/* 0x001fca00078e0206 */
/*0060*/ IMAD.WIDE R2, R6, R7, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x000fc800078e0207 */
/*0070*/ IMAD.WIDE R4, R6.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x0c0fe400078e0207 */
/*0080*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*0090*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea2000c1e1900 */
/*00a0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fc800078e0207 */
/*00b0*/ FADD R9, R2, R5 ; /* 0x0000000502097221 */
/* 0x004fca0000000000 */
/*00c0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00d0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00e0*/ BRA 0xe0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
__global__ void vectorSum(float *a, float *b, float *c){
int i = threadIdx.x + blockIdx.x * blockDim.x;
c[i] = a[i] + b[i];
}
int main(int argc, char *argv[]){
unsigned int length = 4194304;
int i, Size;
float *a, *b, *c, *copyC, *gpuA, *gpuB, *gpuC;
time_t seed;
cudaEvent_t start;
cudaEvent_t stop;
float msecTotal;
cudaEventCreate(&start);
cudaEventCreate(&stop);
if (argc>1)
sscanf(argv[1],"%d",&length);
Size = sizeof(float)*length;
unsigned long int padded_length = floor((length + ((512*32)-1))/(1.0*512*32)) * (1.0*512*32);
a = (float *)calloc(length, sizeof(float));
b = (float *)calloc(length, sizeof(float));
c = (float *)calloc(length, sizeof(float));
copyC = (float *)calloc(length, sizeof(float));
time(&seed);
srand48(seed);
for (i=0; i<length; i++)
a[i] = drand48(), b[i] = drand48();
cudaSetDevice(0);
cudaError_t error;
error = cudaMalloc((void**)&gpuA, padded_length*sizeof(float));
if (error != cudaSuccess) {
printf("oops, %d, error: %d\n", __LINE__, error);
exit(EXIT_FAILURE);
}
error = cudaMemset(gpuA, 0, padded_length*sizeof(float));
if (error != cudaSuccess) {
printf("oops, %d, error: %d\n", __LINE__, error);
exit(EXIT_FAILURE);
}
error = cudaMalloc((void**)&gpuB, padded_length*sizeof(float));
if (error != cudaSuccess) {
printf("oops, %d, error: %d\n", __LINE__, error);
exit(EXIT_FAILURE);
}
error = cudaMemset(gpuB, 0, padded_length*sizeof(float));
if (error != cudaSuccess) {
printf("oops, %d, error: %d\n", __LINE__, error);
exit(EXIT_FAILURE);
}
error = cudaMalloc((void**)&gpuC, padded_length*sizeof(float));
if (error != cudaSuccess) {
printf("oops, %d, error: %d\n", __LINE__, error);
exit(EXIT_FAILURE);
}
error = cudaMemset(gpuC, 0, padded_length*sizeof(float));
if (error != cudaSuccess) {
printf("oops, %d, error: %d\n", __LINE__, error);
exit(EXIT_FAILURE);
}
cudaEventRecord(start, NULL);
for (i=0; i<length; i++)
c[i] = a[i] + b[i];
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&msecTotal, start, stop);
printf("cpu time: %.3f ms\n", msecTotal);
error = cudaMemcpy(gpuA, a, Size, cudaMemcpyHostToDevice);
if (error != cudaSuccess) {
printf("oops, %d, error: %d\n", __LINE__, error);
exit(EXIT_FAILURE);
}
error = cudaMemcpy(gpuB, b, Size, cudaMemcpyHostToDevice);
if (error != cudaSuccess) {
printf("oops, %d, error: %d\n", __LINE__, error);
exit(EXIT_FAILURE);
}
dim3 numThreads(512, 1);
dim3 numBlocks(32, 1);
cudaEventRecord(start, NULL);
vectorSum<<<numBlocks, numThreads>>>(gpuA, gpuB, gpuC);
cudaDeviceSynchronize();
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
error = cudaMemcpy(copyC, gpuC, Size, cudaMemcpyDeviceToHost);
if (error != cudaSuccess) {
printf("oops, %d, error: %d\n", __LINE__, error);
exit(EXIT_FAILURE);
}
cudaEventElapsedTime(&msecTotal, start, stop);
printf("gpu time: %.3f ms\n", msecTotal);
for (i=0; i<length; i++)
if (fabs(c[i]-copyC[i]) > 0.000001){
printf("%d\t%f\t%f\n", i, c[i], copyC[i]);
return 1;
}
return 0;
} | .file "tmpxft_00195aca_00000000-6_VectorSum.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z32__device_stub__Z9vectorSumPfS_S_PfS_S_
.type _Z32__device_stub__Z9vectorSumPfS_S_PfS_S_, @function
_Z32__device_stub__Z9vectorSumPfS_S_PfS_S_:
.LFB2082:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z9vectorSumPfS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z32__device_stub__Z9vectorSumPfS_S_PfS_S_, .-_Z32__device_stub__Z9vectorSumPfS_S_PfS_S_
.globl _Z9vectorSumPfS_S_
.type _Z9vectorSumPfS_S_, @function
_Z9vectorSumPfS_S_:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z32__device_stub__Z9vectorSumPfS_S_PfS_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z9vectorSumPfS_S_, .-_Z9vectorSumPfS_S_
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "%d"
.LC6:
.string "oops, %d, error: %d\n"
.LC7:
.string "cpu time: %.3f ms\n"
.LC8:
.string "gpu time: %.3f ms\n"
.LC11:
.string "%d\t%f\t%f\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $120, %rsp
.cfi_def_cfa_offset 176
movl %edi, %ebx
movq %rsi, %rbp
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
movl $4194304, 24(%rsp)
leaq 64(%rsp), %rdi
call cudaEventCreate@PLT
leaq 72(%rsp), %rdi
call cudaEventCreate@PLT
cmpl $1, %ebx
jg .L44
.L12:
movl 24(%rsp), %ebx
leal 0(,%rbx,4), %eax
movl %eax, 12(%rsp)
leal 16383(%rbx), %eax
pxor %xmm1, %xmm1
cvtsi2sdq %rax, %xmm1
mulsd .LC1(%rip), %xmm1
movapd %xmm1, %xmm0
movsd .LC12(%rip), %xmm2
movapd %xmm1, %xmm3
andpd %xmm2, %xmm3
movsd .LC2(%rip), %xmm4
ucomisd %xmm3, %xmm4
jbe .L15
cvttsd2siq %xmm1, %rax
pxor %xmm3, %xmm3
cvtsi2sdq %rax, %xmm3
andnpd %xmm1, %xmm2
orpd %xmm2, %xmm3
movapd %xmm3, %xmm0
.L15:
mulsd .LC4(%rip), %xmm0
comisd .LC5(%rip), %xmm0
jnb .L16
cvttsd2siq %xmm0, %r15
.L17:
movl %ebx, %ebx
movl $4, %esi
movq %rbx, %rdi
call calloc@PLT
movq %rax, %r12
movl $4, %esi
movq %rbx, %rdi
call calloc@PLT
movq %rax, %rbp
movl $4, %esi
movq %rbx, %rdi
call calloc@PLT
movq %rax, %r13
movl $4, %esi
movq %rbx, %rdi
call calloc@PLT
movq %rax, %r14
leaq 56(%rsp), %rdi
call time@PLT
movq 56(%rsp), %rdi
call srand48@PLT
cmpl $0, 24(%rsp)
je .L18
movl $0, %ebx
.L19:
call drand48@PLT
cvtsd2ss %xmm0, %xmm0
movss %xmm0, (%r12,%rbx,4)
call drand48@PLT
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 0(%rbp,%rbx,4)
addq $1, %rbx
cmpl 24(%rsp), %ebx
jb .L19
.L18:
movl $0, %edi
call cudaSetDevice@PLT
salq $2, %r15
leaq 32(%rsp), %rdi
movq %r15, %rsi
call cudaMalloc@PLT
testl %eax, %eax
jne .L45
movq %r15, %rdx
movl $0, %esi
movq 32(%rsp), %rdi
call cudaMemset@PLT
testl %eax, %eax
jne .L46
leaq 40(%rsp), %rdi
movq %r15, %rsi
call cudaMalloc@PLT
testl %eax, %eax
jne .L47
movq %r15, %rdx
movl $0, %esi
movq 40(%rsp), %rdi
call cudaMemset@PLT
testl %eax, %eax
jne .L48
leaq 48(%rsp), %rdi
movq %r15, %rsi
call cudaMalloc@PLT
testl %eax, %eax
jne .L49
movq %r15, %rdx
movl $0, %esi
movq 48(%rsp), %rdi
call cudaMemset@PLT
testl %eax, %eax
jne .L50
movl $0, %esi
movq 64(%rsp), %rdi
call cudaEventRecord@PLT
movl 24(%rsp), %edx
testl %edx, %edx
je .L26
movl %edx, %edx
salq $2, %rdx
movl $0, %eax
.L27:
movss (%r12,%rax), %xmm0
addss 0(%rbp,%rax), %xmm0
movss %xmm0, 0(%r13,%rax)
addq $4, %rax
cmpq %rdx, %rax
jne .L27
.L26:
movl $0, %esi
movq 72(%rsp), %rdi
call cudaEventRecord@PLT
movq 72(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 28(%rsp), %rdi
movq 72(%rsp), %rdx
movq 64(%rsp), %rsi
call cudaEventElapsedTime@PLT
pxor %xmm0, %xmm0
cvtss2sd 28(%rsp), %xmm0
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movslq 12(%rsp), %rbx
movl $1, %ecx
movq %rbx, %rdx
movq %r12, %rsi
movq 32(%rsp), %rdi
call cudaMemcpy@PLT
testl %eax, %eax
jne .L51
movl $1, %ecx
movq %rbx, %rdx
movq %rbp, %rsi
movq 40(%rsp), %rdi
call cudaMemcpy@PLT
testl %eax, %eax
jne .L52
movl $512, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 88(%rsp)
movl $32, 92(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
movl $0, %esi
movq 64(%rsp), %rdi
call cudaEventRecord@PLT
movl 88(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 80(%rsp), %rdx
movq 92(%rsp), %rdi
movl 100(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L53
.L30:
call cudaDeviceSynchronize@PLT
movl $0, %esi
movq 72(%rsp), %rdi
call cudaEventRecord@PLT
movq 72(%rsp), %rdi
call cudaEventSynchronize@PLT
movl $2, %ecx
movq %rbx, %rdx
movq 48(%rsp), %rsi
movq %r14, %rdi
call cudaMemcpy@PLT
testl %eax, %eax
jne .L54
leaq 28(%rsp), %rdi
movq 72(%rsp), %rdx
movq 64(%rsp), %rsi
call cudaEventElapsedTime@PLT
pxor %xmm0, %xmm0
cvtss2sd 28(%rsp), %xmm0
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movl 24(%rsp), %eax
testl %eax, %eax
je .L37
movl %eax, %eax
movl $0, %edx
movss .LC9(%rip), %xmm4
movsd .LC10(%rip), %xmm3
.L35:
movss 0(%r13,%rdx,4), %xmm0
movss (%r14,%rdx,4), %xmm1
movaps %xmm0, %xmm2
subss %xmm1, %xmm2
andps %xmm4, %xmm2
cvtss2sd %xmm2, %xmm2
comisd %xmm3, %xmm2
ja .L55
addq $1, %rdx
cmpq %rax, %rdx
jne .L35
movl $0, %eax
jmp .L11
.L44:
leaq 24(%rsp), %rdx
movq 8(%rbp), %rdi
leaq .LC0(%rip), %rsi
movl $0, %eax
call __isoc23_sscanf@PLT
jmp .L12
.L16:
subsd .LC5(%rip), %xmm0
cvttsd2siq %xmm0, %r15
btcq $63, %r15
jmp .L17
.L45:
movl %eax, %ecx
movl $38, %edx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L46:
movl %eax, %ecx
movl $43, %edx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L47:
movl %eax, %ecx
movl $48, %edx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L48:
movl %eax, %ecx
movl $53, %edx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L49:
movl %eax, %ecx
movl $58, %edx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L50:
movl %eax, %ecx
movl $63, %edx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L51:
movl %eax, %ecx
movl $76, %edx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L52:
movl %eax, %ecx
movl $82, %edx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L53:
movq 48(%rsp), %rdx
movq 40(%rsp), %rsi
movq 32(%rsp), %rdi
call _Z32__device_stub__Z9vectorSumPfS_S_PfS_S_
jmp .L30
.L54:
movl %eax, %ecx
movl $95, %edx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L55:
cvtss2sd %xmm0, %xmm0
cvtss2sd %xmm1, %xmm1
leaq .LC11(%rip), %rsi
movl $2, %edi
movl $2, %eax
call __printf_chk@PLT
movl $1, %eax
.L11:
movq 104(%rsp), %rdx
subq %fs:40, %rdx
jne .L56
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L37:
.cfi_restore_state
movl $0, %eax
jmp .L11
.L56:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC13:
.string "_Z9vectorSumPfS_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC13(%rip), %rdx
movq %rdx, %rcx
leaq _Z9vectorSumPfS_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC1:
.long 0
.long 1058013184
.align 8
.LC2:
.long 0
.long 1127219200
.align 8
.LC4:
.long 0
.long 1087373312
.align 8
.LC5:
.long 0
.long 1138753536
.section .rodata.cst16,"aM",@progbits,16
.align 16
.LC9:
.long 2147483647
.long 0
.long 0
.long 0
.section .rodata.cst8
.align 8
.LC10:
.long -1598689907
.long 1051772663
.align 8
.LC12:
.long -1
.long 2147483647
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
__global__ void vectorSum(float *a, float *b, float *c){
int i = threadIdx.x + blockIdx.x * blockDim.x;
c[i] = a[i] + b[i];
}
int main(int argc, char *argv[]){
unsigned int length = 4194304;
int i, Size;
float *a, *b, *c, *copyC, *gpuA, *gpuB, *gpuC;
time_t seed;
cudaEvent_t start;
cudaEvent_t stop;
float msecTotal;
cudaEventCreate(&start);
cudaEventCreate(&stop);
if (argc>1)
sscanf(argv[1],"%d",&length);
Size = sizeof(float)*length;
unsigned long int padded_length = floor((length + ((512*32)-1))/(1.0*512*32)) * (1.0*512*32);
a = (float *)calloc(length, sizeof(float));
b = (float *)calloc(length, sizeof(float));
c = (float *)calloc(length, sizeof(float));
copyC = (float *)calloc(length, sizeof(float));
time(&seed);
srand48(seed);
for (i=0; i<length; i++)
a[i] = drand48(), b[i] = drand48();
cudaSetDevice(0);
cudaError_t error;
error = cudaMalloc((void**)&gpuA, padded_length*sizeof(float));
if (error != cudaSuccess) {
printf("oops, %d, error: %d\n", __LINE__, error);
exit(EXIT_FAILURE);
}
error = cudaMemset(gpuA, 0, padded_length*sizeof(float));
if (error != cudaSuccess) {
printf("oops, %d, error: %d\n", __LINE__, error);
exit(EXIT_FAILURE);
}
error = cudaMalloc((void**)&gpuB, padded_length*sizeof(float));
if (error != cudaSuccess) {
printf("oops, %d, error: %d\n", __LINE__, error);
exit(EXIT_FAILURE);
}
error = cudaMemset(gpuB, 0, padded_length*sizeof(float));
if (error != cudaSuccess) {
printf("oops, %d, error: %d\n", __LINE__, error);
exit(EXIT_FAILURE);
}
error = cudaMalloc((void**)&gpuC, padded_length*sizeof(float));
if (error != cudaSuccess) {
printf("oops, %d, error: %d\n", __LINE__, error);
exit(EXIT_FAILURE);
}
error = cudaMemset(gpuC, 0, padded_length*sizeof(float));
if (error != cudaSuccess) {
printf("oops, %d, error: %d\n", __LINE__, error);
exit(EXIT_FAILURE);
}
cudaEventRecord(start, NULL);
for (i=0; i<length; i++)
c[i] = a[i] + b[i];
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&msecTotal, start, stop);
printf("cpu time: %.3f ms\n", msecTotal);
error = cudaMemcpy(gpuA, a, Size, cudaMemcpyHostToDevice);
if (error != cudaSuccess) {
printf("oops, %d, error: %d\n", __LINE__, error);
exit(EXIT_FAILURE);
}
error = cudaMemcpy(gpuB, b, Size, cudaMemcpyHostToDevice);
if (error != cudaSuccess) {
printf("oops, %d, error: %d\n", __LINE__, error);
exit(EXIT_FAILURE);
}
dim3 numThreads(512, 1);
dim3 numBlocks(32, 1);
cudaEventRecord(start, NULL);
vectorSum<<<numBlocks, numThreads>>>(gpuA, gpuB, gpuC);
cudaDeviceSynchronize();
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
error = cudaMemcpy(copyC, gpuC, Size, cudaMemcpyDeviceToHost);
if (error != cudaSuccess) {
printf("oops, %d, error: %d\n", __LINE__, error);
exit(EXIT_FAILURE);
}
cudaEventElapsedTime(&msecTotal, start, stop);
printf("gpu time: %.3f ms\n", msecTotal);
for (i=0; i<length; i++)
if (fabs(c[i]-copyC[i]) > 0.000001){
printf("%d\t%f\t%f\n", i, c[i], copyC[i]);
return 1;
}
return 0;
} | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
__global__ void vectorSum(float *a, float *b, float *c){
int i = threadIdx.x + blockIdx.x * blockDim.x;
c[i] = a[i] + b[i];
}
int main(int argc, char *argv[]){
unsigned int length = 4194304;
int i, Size;
float *a, *b, *c, *copyC, *gpuA, *gpuB, *gpuC;
time_t seed;
hipEvent_t start;
hipEvent_t stop;
float msecTotal;
hipEventCreate(&start);
hipEventCreate(&stop);
if (argc>1)
sscanf(argv[1],"%d",&length);
Size = sizeof(float)*length;
unsigned long int padded_length = floor((length + ((512*32)-1))/(1.0*512*32)) * (1.0*512*32);
a = (float *)calloc(length, sizeof(float));
b = (float *)calloc(length, sizeof(float));
c = (float *)calloc(length, sizeof(float));
copyC = (float *)calloc(length, sizeof(float));
time(&seed);
srand48(seed);
for (i=0; i<length; i++)
a[i] = drand48(), b[i] = drand48();
hipSetDevice(0);
hipError_t error;
error = hipMalloc((void**)&gpuA, padded_length*sizeof(float));
if (error != hipSuccess) {
printf("oops, %d, error: %d\n", __LINE__, error);
exit(EXIT_FAILURE);
}
error = hipMemset(gpuA, 0, padded_length*sizeof(float));
if (error != hipSuccess) {
printf("oops, %d, error: %d\n", __LINE__, error);
exit(EXIT_FAILURE);
}
error = hipMalloc((void**)&gpuB, padded_length*sizeof(float));
if (error != hipSuccess) {
printf("oops, %d, error: %d\n", __LINE__, error);
exit(EXIT_FAILURE);
}
error = hipMemset(gpuB, 0, padded_length*sizeof(float));
if (error != hipSuccess) {
printf("oops, %d, error: %d\n", __LINE__, error);
exit(EXIT_FAILURE);
}
error = hipMalloc((void**)&gpuC, padded_length*sizeof(float));
if (error != hipSuccess) {
printf("oops, %d, error: %d\n", __LINE__, error);
exit(EXIT_FAILURE);
}
error = hipMemset(gpuC, 0, padded_length*sizeof(float));
if (error != hipSuccess) {
printf("oops, %d, error: %d\n", __LINE__, error);
exit(EXIT_FAILURE);
}
hipEventRecord(start, NULL);
for (i=0; i<length; i++)
c[i] = a[i] + b[i];
hipEventRecord(stop, NULL);
hipEventSynchronize(stop);
hipEventElapsedTime(&msecTotal, start, stop);
printf("cpu time: %.3f ms\n", msecTotal);
error = hipMemcpy(gpuA, a, Size, hipMemcpyHostToDevice);
if (error != hipSuccess) {
printf("oops, %d, error: %d\n", __LINE__, error);
exit(EXIT_FAILURE);
}
error = hipMemcpy(gpuB, b, Size, hipMemcpyHostToDevice);
if (error != hipSuccess) {
printf("oops, %d, error: %d\n", __LINE__, error);
exit(EXIT_FAILURE);
}
dim3 numThreads(512, 1);
dim3 numBlocks(32, 1);
hipEventRecord(start, NULL);
vectorSum<<<numBlocks, numThreads>>>(gpuA, gpuB, gpuC);
hipDeviceSynchronize();
hipEventRecord(stop, NULL);
hipEventSynchronize(stop);
error = hipMemcpy(copyC, gpuC, Size, hipMemcpyDeviceToHost);
if (error != hipSuccess) {
printf("oops, %d, error: %d\n", __LINE__, error);
exit(EXIT_FAILURE);
}
hipEventElapsedTime(&msecTotal, start, stop);
printf("gpu time: %.3f ms\n", msecTotal);
for (i=0; i<length; i++)
if (fabs(c[i]-copyC[i]) > 0.000001){
printf("%d\t%f\t%f\n", i, c[i], copyC[i]);
return 1;
}
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
__global__ void vectorSum(float *a, float *b, float *c){
int i = threadIdx.x + blockIdx.x * blockDim.x;
c[i] = a[i] + b[i];
}
int main(int argc, char *argv[]){
unsigned int length = 4194304;
int i, Size;
float *a, *b, *c, *copyC, *gpuA, *gpuB, *gpuC;
time_t seed;
hipEvent_t start;
hipEvent_t stop;
float msecTotal;
hipEventCreate(&start);
hipEventCreate(&stop);
if (argc>1)
sscanf(argv[1],"%d",&length);
Size = sizeof(float)*length;
unsigned long int padded_length = floor((length + ((512*32)-1))/(1.0*512*32)) * (1.0*512*32);
a = (float *)calloc(length, sizeof(float));
b = (float *)calloc(length, sizeof(float));
c = (float *)calloc(length, sizeof(float));
copyC = (float *)calloc(length, sizeof(float));
time(&seed);
srand48(seed);
for (i=0; i<length; i++)
a[i] = drand48(), b[i] = drand48();
hipSetDevice(0);
hipError_t error;
error = hipMalloc((void**)&gpuA, padded_length*sizeof(float));
if (error != hipSuccess) {
printf("oops, %d, error: %d\n", __LINE__, error);
exit(EXIT_FAILURE);
}
error = hipMemset(gpuA, 0, padded_length*sizeof(float));
if (error != hipSuccess) {
printf("oops, %d, error: %d\n", __LINE__, error);
exit(EXIT_FAILURE);
}
error = hipMalloc((void**)&gpuB, padded_length*sizeof(float));
if (error != hipSuccess) {
printf("oops, %d, error: %d\n", __LINE__, error);
exit(EXIT_FAILURE);
}
error = hipMemset(gpuB, 0, padded_length*sizeof(float));
if (error != hipSuccess) {
printf("oops, %d, error: %d\n", __LINE__, error);
exit(EXIT_FAILURE);
}
error = hipMalloc((void**)&gpuC, padded_length*sizeof(float));
if (error != hipSuccess) {
printf("oops, %d, error: %d\n", __LINE__, error);
exit(EXIT_FAILURE);
}
error = hipMemset(gpuC, 0, padded_length*sizeof(float));
if (error != hipSuccess) {
printf("oops, %d, error: %d\n", __LINE__, error);
exit(EXIT_FAILURE);
}
hipEventRecord(start, NULL);
for (i=0; i<length; i++)
c[i] = a[i] + b[i];
hipEventRecord(stop, NULL);
hipEventSynchronize(stop);
hipEventElapsedTime(&msecTotal, start, stop);
printf("cpu time: %.3f ms\n", msecTotal);
error = hipMemcpy(gpuA, a, Size, hipMemcpyHostToDevice);
if (error != hipSuccess) {
printf("oops, %d, error: %d\n", __LINE__, error);
exit(EXIT_FAILURE);
}
error = hipMemcpy(gpuB, b, Size, hipMemcpyHostToDevice);
if (error != hipSuccess) {
printf("oops, %d, error: %d\n", __LINE__, error);
exit(EXIT_FAILURE);
}
dim3 numThreads(512, 1);
dim3 numBlocks(32, 1);
hipEventRecord(start, NULL);
vectorSum<<<numBlocks, numThreads>>>(gpuA, gpuB, gpuC);
hipDeviceSynchronize();
hipEventRecord(stop, NULL);
hipEventSynchronize(stop);
error = hipMemcpy(copyC, gpuC, Size, hipMemcpyDeviceToHost);
if (error != hipSuccess) {
printf("oops, %d, error: %d\n", __LINE__, error);
exit(EXIT_FAILURE);
}
hipEventElapsedTime(&msecTotal, start, stop);
printf("gpu time: %.3f ms\n", msecTotal);
for (i=0; i<length; i++)
if (fabs(c[i]-copyC[i]) > 0.000001){
printf("%d\t%f\t%f\n", i, c[i], copyC[i]);
return 1;
}
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z9vectorSumPfS_S_
.globl _Z9vectorSumPfS_S_
.p2align 8
.type _Z9vectorSumPfS_S_,@function
_Z9vectorSumPfS_S_:
s_clause 0x2
s_load_b32 s2, s[0:1], 0x24
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[0:1], s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
v_add_co_u32 v2, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_f32_e32 v2, v2, v3
global_store_b32 v[0:1], v2, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9vectorSumPfS_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z9vectorSumPfS_S_, .Lfunc_end0-_Z9vectorSumPfS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9vectorSumPfS_S_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z9vectorSumPfS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
__global__ void vectorSum(float *a, float *b, float *c){
int i = threadIdx.x + blockIdx.x * blockDim.x;
c[i] = a[i] + b[i];
}
int main(int argc, char *argv[]){
unsigned int length = 4194304;
int i, Size;
float *a, *b, *c, *copyC, *gpuA, *gpuB, *gpuC;
time_t seed;
hipEvent_t start;
hipEvent_t stop;
float msecTotal;
hipEventCreate(&start);
hipEventCreate(&stop);
if (argc>1)
sscanf(argv[1],"%d",&length);
Size = sizeof(float)*length;
unsigned long int padded_length = floor((length + ((512*32)-1))/(1.0*512*32)) * (1.0*512*32);
a = (float *)calloc(length, sizeof(float));
b = (float *)calloc(length, sizeof(float));
c = (float *)calloc(length, sizeof(float));
copyC = (float *)calloc(length, sizeof(float));
time(&seed);
srand48(seed);
for (i=0; i<length; i++)
a[i] = drand48(), b[i] = drand48();
hipSetDevice(0);
hipError_t error;
error = hipMalloc((void**)&gpuA, padded_length*sizeof(float));
if (error != hipSuccess) {
printf("oops, %d, error: %d\n", __LINE__, error);
exit(EXIT_FAILURE);
}
error = hipMemset(gpuA, 0, padded_length*sizeof(float));
if (error != hipSuccess) {
printf("oops, %d, error: %d\n", __LINE__, error);
exit(EXIT_FAILURE);
}
error = hipMalloc((void**)&gpuB, padded_length*sizeof(float));
if (error != hipSuccess) {
printf("oops, %d, error: %d\n", __LINE__, error);
exit(EXIT_FAILURE);
}
error = hipMemset(gpuB, 0, padded_length*sizeof(float));
if (error != hipSuccess) {
printf("oops, %d, error: %d\n", __LINE__, error);
exit(EXIT_FAILURE);
}
error = hipMalloc((void**)&gpuC, padded_length*sizeof(float));
if (error != hipSuccess) {
printf("oops, %d, error: %d\n", __LINE__, error);
exit(EXIT_FAILURE);
}
error = hipMemset(gpuC, 0, padded_length*sizeof(float));
if (error != hipSuccess) {
printf("oops, %d, error: %d\n", __LINE__, error);
exit(EXIT_FAILURE);
}
hipEventRecord(start, NULL);
for (i=0; i<length; i++)
c[i] = a[i] + b[i];
hipEventRecord(stop, NULL);
hipEventSynchronize(stop);
hipEventElapsedTime(&msecTotal, start, stop);
printf("cpu time: %.3f ms\n", msecTotal);
error = hipMemcpy(gpuA, a, Size, hipMemcpyHostToDevice);
if (error != hipSuccess) {
printf("oops, %d, error: %d\n", __LINE__, error);
exit(EXIT_FAILURE);
}
error = hipMemcpy(gpuB, b, Size, hipMemcpyHostToDevice);
if (error != hipSuccess) {
printf("oops, %d, error: %d\n", __LINE__, error);
exit(EXIT_FAILURE);
}
dim3 numThreads(512, 1);
dim3 numBlocks(32, 1);
hipEventRecord(start, NULL);
vectorSum<<<numBlocks, numThreads>>>(gpuA, gpuB, gpuC);
hipDeviceSynchronize();
hipEventRecord(stop, NULL);
hipEventSynchronize(stop);
error = hipMemcpy(copyC, gpuC, Size, hipMemcpyDeviceToHost);
if (error != hipSuccess) {
printf("oops, %d, error: %d\n", __LINE__, error);
exit(EXIT_FAILURE);
}
hipEventElapsedTime(&msecTotal, start, stop);
printf("gpu time: %.3f ms\n", msecTotal);
for (i=0; i<length; i++)
if (fabs(c[i]-copyC[i]) > 0.000001){
printf("%d\t%f\t%f\n", i, c[i], copyC[i]);
return 1;
}
return 0;
} | .text
.file "VectorSum.hip"
.globl _Z24__device_stub__vectorSumPfS_S_ # -- Begin function _Z24__device_stub__vectorSumPfS_S_
.p2align 4, 0x90
.type _Z24__device_stub__vectorSumPfS_S_,@function
_Z24__device_stub__vectorSumPfS_S_: # @_Z24__device_stub__vectorSumPfS_S_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9vectorSumPfS_S_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z24__device_stub__vectorSumPfS_S_, .Lfunc_end0-_Z24__device_stub__vectorSumPfS_S_
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI1_0:
.quad 0x3f10000000000000 # double 6.103515625E-5
.LCPI1_1:
.quad 0x40d0000000000000 # double 16384
.LCPI1_2:
.quad 0x43e0000000000000 # double 9.2233720368547758E+18
.LCPI1_4:
.quad 0x3eb0c6f7a0b5ed8d # double 9.9999999999999995E-7
.section .rodata.cst16,"aM",@progbits,16
.p2align 4, 0x0
.LCPI1_3:
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $168, %rsp
.cfi_def_cfa_offset 224
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rsi, %rbx
movl %edi, %ebp
movl $4194304, 8(%rsp) # imm = 0x400000
leaq 24(%rsp), %rdi
callq hipEventCreate
leaq 16(%rsp), %rdi
callq hipEventCreate
cmpl $2, %ebp
jl .LBB1_2
# %bb.1:
movq 8(%rbx), %rdi
leaq 8(%rsp), %rdx
movl $.L.str, %esi
xorl %eax, %eax
callq __isoc23_sscanf
.LBB1_2:
movl 8(%rsp), %r13d
leal 16383(%r13), %eax
cvtsi2sd %rax, %xmm0
mulsd .LCPI1_0(%rip), %xmm0
callq floor@PLT
mulsd .LCPI1_1(%rip), %xmm0
cvttsd2si %xmm0, %rax
movq %rax, %rcx
sarq $63, %rcx
subsd .LCPI1_2(%rip), %xmm0
cvttsd2si %xmm0, %rbp
andq %rcx, %rbp
orq %rax, %rbp
movl $4, %esi
movq %r13, %rdi
callq calloc
movq %rax, %r12
movl $4, %esi
movq %r13, %rdi
callq calloc
movq %rax, %r15
movl $4, %esi
movq %r13, %rdi
callq calloc
movq %rax, %rbx
movl $4, %esi
movq %r13, %rdi
callq calloc
movq %rax, 56(%rsp) # 8-byte Spill
leaq 64(%rsp), %rdi
callq time
movq 64(%rsp), %rdi
callq srand48
cmpl $0, 8(%rsp)
je .LBB1_5
# %bb.3: # %.lr.ph.preheader
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB1_4: # %.lr.ph
# =>This Inner Loop Header: Depth=1
callq drand48
cvtsd2ss %xmm0, %xmm0
movss %xmm0, (%r12,%r14,4)
callq drand48
cvtsd2ss %xmm0, %xmm0
movss %xmm0, (%r15,%r14,4)
incq %r14
movl 8(%rsp), %eax
cmpq %rax, %r14
jb .LBB1_4
.LBB1_5: # %._crit_edge
xorl %edi, %edi
callq hipSetDevice
shlq $2, %rbp
leaq 48(%rsp), %rdi
movq %rbp, %rsi
callq hipMalloc
testl %eax, %eax
jne .LBB1_6
# %bb.8:
movq 48(%rsp), %rdi
xorl %esi, %esi
movq %rbp, %rdx
callq hipMemset
testl %eax, %eax
jne .LBB1_9
# %bb.10:
leaq 40(%rsp), %rdi
movq %rbp, %rsi
callq hipMalloc
testl %eax, %eax
jne .LBB1_11
# %bb.12:
movq 40(%rsp), %rdi
xorl %esi, %esi
movq %rbp, %rdx
callq hipMemset
testl %eax, %eax
jne .LBB1_13
# %bb.14:
leaq 32(%rsp), %rdi
movq %rbp, %rsi
callq hipMalloc
testl %eax, %eax
jne .LBB1_15
# %bb.16:
movq 32(%rsp), %rdi
xorl %esi, %esi
movq %rbp, %rdx
callq hipMemset
testl %eax, %eax
jne .LBB1_17
# %bb.18:
leal (,%r13,4), %ebp
movq 24(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movl 8(%rsp), %eax
testq %rax, %rax
je .LBB1_21
# %bb.19: # %.lr.ph89.preheader
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB1_20: # %.lr.ph89
# =>This Inner Loop Header: Depth=1
movss (%r12,%rcx,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
addss (%r15,%rcx,4), %xmm0
movss %xmm0, (%rbx,%rcx,4)
incq %rcx
cmpq %rcx, %rax
jne .LBB1_20
.LBB1_21: # %._crit_edge90
movq 16(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 16(%rsp), %rdi
callq hipEventSynchronize
movq 24(%rsp), %rsi
movq 16(%rsp), %rdx
leaq 12(%rsp), %rdi
callq hipEventElapsedTime
movss 12(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.2, %edi
movb $1, %al
callq printf
movq 48(%rsp), %rdi
movslq %ebp, %r13
movq %r12, %rsi
movq %r13, %rdx
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB1_22
# %bb.23:
movq 40(%rsp), %rdi
movq %r15, %rsi
movq %r13, %rdx
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
movq 56(%rsp), %r14 # 8-byte Reload
jne .LBB1_24
# %bb.25:
movq 24(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movabsq $4294967328, %rdi # imm = 0x100000020
leaq 480(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_27
# %bb.26:
movq 48(%rsp), %rax
movq 40(%rsp), %rcx
movq 32(%rsp), %rdx
movq %rax, 136(%rsp)
movq %rcx, 128(%rsp)
movq %rdx, 120(%rsp)
leaq 136(%rsp), %rax
movq %rax, 144(%rsp)
leaq 128(%rsp), %rax
movq %rax, 152(%rsp)
leaq 120(%rsp), %rax
movq %rax, 160(%rsp)
leaq 104(%rsp), %rdi
leaq 88(%rsp), %rsi
leaq 80(%rsp), %rdx
leaq 72(%rsp), %rcx
callq __hipPopCallConfiguration
movq 104(%rsp), %rsi
movl 112(%rsp), %edx
movq 88(%rsp), %rcx
movl 96(%rsp), %r8d
leaq 144(%rsp), %r9
movl $_Z9vectorSumPfS_S_, %edi
pushq 72(%rsp)
.cfi_adjust_cfa_offset 8
pushq 88(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_27:
callq hipDeviceSynchronize
movq 16(%rsp), %rdi
xorl %r15d, %r15d
xorl %esi, %esi
callq hipEventRecord
movq 16(%rsp), %rdi
callq hipEventSynchronize
movq 32(%rsp), %rsi
movq %r14, %rdi
movq %r13, %rdx
movl $2, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB1_28
# %bb.29:
movq 24(%rsp), %rsi
movq 16(%rsp), %rdx
leaq 12(%rsp), %rdi
callq hipEventElapsedTime
movss 12(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.3, %edi
movb $1, %al
callq printf
movl 8(%rsp), %eax
testq %rax, %rax
je .LBB1_35
# %bb.30: # %.lr.ph93.preheader
xorl %esi, %esi
movaps .LCPI1_3(%rip), %xmm0 # xmm0 = [NaN,NaN,NaN,NaN]
movsd .LCPI1_4(%rip), %xmm1 # xmm1 = mem[0],zero
.p2align 4, 0x90
.LBB1_31: # %.lr.ph93
# =>This Inner Loop Header: Depth=1
movss (%rbx,%rsi,4), %xmm2 # xmm2 = mem[0],zero,zero,zero
movss (%r14,%rsi,4), %xmm3 # xmm3 = mem[0],zero,zero,zero
movaps %xmm2, %xmm4
subss %xmm3, %xmm4
andps %xmm0, %xmm4
cvtss2sd %xmm4, %xmm4
ucomisd %xmm1, %xmm4
ja .LBB1_32
# %bb.33: # in Loop: Header=BB1_31 Depth=1
incq %rsi
cmpq %rsi, %rax
jne .LBB1_31
# %bb.34:
xorl %r15d, %r15d
jmp .LBB1_35
.LBB1_32:
xorps %xmm0, %xmm0
cvtss2sd %xmm2, %xmm0
xorps %xmm1, %xmm1
cvtss2sd %xmm3, %xmm1
movl $.L.str.4, %edi
# kill: def $esi killed $esi killed $rsi
movb $2, %al
callq printf
movl $1, %r15d
.LBB1_35: # %.loopexit
movl %r15d, %eax
addq $168, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB1_6:
.cfi_def_cfa_offset 224
movl $.L.str.1, %edi
movl $40, %esi
jmp .LBB1_7
.LBB1_9:
movl $.L.str.1, %edi
movl $45, %esi
jmp .LBB1_7
.LBB1_11:
movl $.L.str.1, %edi
movl $50, %esi
jmp .LBB1_7
.LBB1_13:
movl $.L.str.1, %edi
movl $55, %esi
jmp .LBB1_7
.LBB1_15:
movl $.L.str.1, %edi
movl $60, %esi
jmp .LBB1_7
.LBB1_17:
movl $.L.str.1, %edi
movl $65, %esi
jmp .LBB1_7
.LBB1_22:
movl $.L.str.1, %edi
movl $78, %esi
jmp .LBB1_7
.LBB1_24:
movl $.L.str.1, %edi
movl $84, %esi
jmp .LBB1_7
.LBB1_28:
movl $.L.str.1, %edi
movl $97, %esi
.LBB1_7:
movl %eax, %edx
xorl %eax, %eax
callq printf
movl $1, %edi
callq exit
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9vectorSumPfS_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9vectorSumPfS_S_,@object # @_Z9vectorSumPfS_S_
.section .rodata,"a",@progbits
.globl _Z9vectorSumPfS_S_
.p2align 3, 0x0
_Z9vectorSumPfS_S_:
.quad _Z24__device_stub__vectorSumPfS_S_
.size _Z9vectorSumPfS_S_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "%d"
.size .L.str, 3
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "oops, %d, error: %d\n"
.size .L.str.1, 21
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "cpu time: %.3f ms\n"
.size .L.str.2, 19
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "gpu time: %.3f ms\n"
.size .L.str.3, 19
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "%d\t%f\t%f\n"
.size .L.str.4, 10
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z9vectorSumPfS_S_"
.size .L__unnamed_1, 19
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__vectorSumPfS_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9vectorSumPfS_S_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z9vectorSumPfS_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_TID.X ; /* 0x0000000000067919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e240000002500 */
/*0050*/ IMAD R6, R3, c[0x0][0x0], R6 ; /* 0x0000000003067a24 */
/* 0x001fca00078e0206 */
/*0060*/ IMAD.WIDE R2, R6, R7, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x000fc800078e0207 */
/*0070*/ IMAD.WIDE R4, R6.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x0c0fe400078e0207 */
/*0080*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*0090*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea2000c1e1900 */
/*00a0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fc800078e0207 */
/*00b0*/ FADD R9, R2, R5 ; /* 0x0000000502097221 */
/* 0x004fca0000000000 */
/*00c0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00d0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00e0*/ BRA 0xe0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z9vectorSumPfS_S_
.globl _Z9vectorSumPfS_S_
.p2align 8
.type _Z9vectorSumPfS_S_,@function
_Z9vectorSumPfS_S_:
s_clause 0x2
s_load_b32 s2, s[0:1], 0x24
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[0:1], s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
v_add_co_u32 v2, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_f32_e32 v2, v2, v3
global_store_b32 v[0:1], v2, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9vectorSumPfS_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z9vectorSumPfS_S_, .Lfunc_end0-_Z9vectorSumPfS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9vectorSumPfS_S_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z9vectorSumPfS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00195aca_00000000-6_VectorSum.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z32__device_stub__Z9vectorSumPfS_S_PfS_S_
.type _Z32__device_stub__Z9vectorSumPfS_S_PfS_S_, @function
_Z32__device_stub__Z9vectorSumPfS_S_PfS_S_:
.LFB2082:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z9vectorSumPfS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z32__device_stub__Z9vectorSumPfS_S_PfS_S_, .-_Z32__device_stub__Z9vectorSumPfS_S_PfS_S_
.globl _Z9vectorSumPfS_S_
.type _Z9vectorSumPfS_S_, @function
_Z9vectorSumPfS_S_:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z32__device_stub__Z9vectorSumPfS_S_PfS_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z9vectorSumPfS_S_, .-_Z9vectorSumPfS_S_
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "%d"
.LC6:
.string "oops, %d, error: %d\n"
.LC7:
.string "cpu time: %.3f ms\n"
.LC8:
.string "gpu time: %.3f ms\n"
.LC11:
.string "%d\t%f\t%f\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $120, %rsp
.cfi_def_cfa_offset 176
movl %edi, %ebx
movq %rsi, %rbp
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
movl $4194304, 24(%rsp)
leaq 64(%rsp), %rdi
call cudaEventCreate@PLT
leaq 72(%rsp), %rdi
call cudaEventCreate@PLT
cmpl $1, %ebx
jg .L44
.L12:
movl 24(%rsp), %ebx
leal 0(,%rbx,4), %eax
movl %eax, 12(%rsp)
leal 16383(%rbx), %eax
pxor %xmm1, %xmm1
cvtsi2sdq %rax, %xmm1
mulsd .LC1(%rip), %xmm1
movapd %xmm1, %xmm0
movsd .LC12(%rip), %xmm2
movapd %xmm1, %xmm3
andpd %xmm2, %xmm3
movsd .LC2(%rip), %xmm4
ucomisd %xmm3, %xmm4
jbe .L15
cvttsd2siq %xmm1, %rax
pxor %xmm3, %xmm3
cvtsi2sdq %rax, %xmm3
andnpd %xmm1, %xmm2
orpd %xmm2, %xmm3
movapd %xmm3, %xmm0
.L15:
mulsd .LC4(%rip), %xmm0
comisd .LC5(%rip), %xmm0
jnb .L16
cvttsd2siq %xmm0, %r15
.L17:
movl %ebx, %ebx
movl $4, %esi
movq %rbx, %rdi
call calloc@PLT
movq %rax, %r12
movl $4, %esi
movq %rbx, %rdi
call calloc@PLT
movq %rax, %rbp
movl $4, %esi
movq %rbx, %rdi
call calloc@PLT
movq %rax, %r13
movl $4, %esi
movq %rbx, %rdi
call calloc@PLT
movq %rax, %r14
leaq 56(%rsp), %rdi
call time@PLT
movq 56(%rsp), %rdi
call srand48@PLT
cmpl $0, 24(%rsp)
je .L18
movl $0, %ebx
.L19:
call drand48@PLT
cvtsd2ss %xmm0, %xmm0
movss %xmm0, (%r12,%rbx,4)
call drand48@PLT
cvtsd2ss %xmm0, %xmm0
movss %xmm0, 0(%rbp,%rbx,4)
addq $1, %rbx
cmpl 24(%rsp), %ebx
jb .L19
.L18:
movl $0, %edi
call cudaSetDevice@PLT
salq $2, %r15
leaq 32(%rsp), %rdi
movq %r15, %rsi
call cudaMalloc@PLT
testl %eax, %eax
jne .L45
movq %r15, %rdx
movl $0, %esi
movq 32(%rsp), %rdi
call cudaMemset@PLT
testl %eax, %eax
jne .L46
leaq 40(%rsp), %rdi
movq %r15, %rsi
call cudaMalloc@PLT
testl %eax, %eax
jne .L47
movq %r15, %rdx
movl $0, %esi
movq 40(%rsp), %rdi
call cudaMemset@PLT
testl %eax, %eax
jne .L48
leaq 48(%rsp), %rdi
movq %r15, %rsi
call cudaMalloc@PLT
testl %eax, %eax
jne .L49
movq %r15, %rdx
movl $0, %esi
movq 48(%rsp), %rdi
call cudaMemset@PLT
testl %eax, %eax
jne .L50
movl $0, %esi
movq 64(%rsp), %rdi
call cudaEventRecord@PLT
movl 24(%rsp), %edx
testl %edx, %edx
je .L26
movl %edx, %edx
salq $2, %rdx
movl $0, %eax
.L27:
movss (%r12,%rax), %xmm0
addss 0(%rbp,%rax), %xmm0
movss %xmm0, 0(%r13,%rax)
addq $4, %rax
cmpq %rdx, %rax
jne .L27
.L26:
movl $0, %esi
movq 72(%rsp), %rdi
call cudaEventRecord@PLT
movq 72(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 28(%rsp), %rdi
movq 72(%rsp), %rdx
movq 64(%rsp), %rsi
call cudaEventElapsedTime@PLT
pxor %xmm0, %xmm0
cvtss2sd 28(%rsp), %xmm0
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movslq 12(%rsp), %rbx
movl $1, %ecx
movq %rbx, %rdx
movq %r12, %rsi
movq 32(%rsp), %rdi
call cudaMemcpy@PLT
testl %eax, %eax
jne .L51
movl $1, %ecx
movq %rbx, %rdx
movq %rbp, %rsi
movq 40(%rsp), %rdi
call cudaMemcpy@PLT
testl %eax, %eax
jne .L52
movl $512, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 88(%rsp)
movl $32, 92(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
movl $0, %esi
movq 64(%rsp), %rdi
call cudaEventRecord@PLT
movl 88(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 80(%rsp), %rdx
movq 92(%rsp), %rdi
movl 100(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L53
.L30:
call cudaDeviceSynchronize@PLT
movl $0, %esi
movq 72(%rsp), %rdi
call cudaEventRecord@PLT
movq 72(%rsp), %rdi
call cudaEventSynchronize@PLT
movl $2, %ecx
movq %rbx, %rdx
movq 48(%rsp), %rsi
movq %r14, %rdi
call cudaMemcpy@PLT
testl %eax, %eax
jne .L54
leaq 28(%rsp), %rdi
movq 72(%rsp), %rdx
movq 64(%rsp), %rsi
call cudaEventElapsedTime@PLT
pxor %xmm0, %xmm0
cvtss2sd 28(%rsp), %xmm0
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movl 24(%rsp), %eax
testl %eax, %eax
je .L37
movl %eax, %eax
movl $0, %edx
movss .LC9(%rip), %xmm4
movsd .LC10(%rip), %xmm3
.L35:
movss 0(%r13,%rdx,4), %xmm0
movss (%r14,%rdx,4), %xmm1
movaps %xmm0, %xmm2
subss %xmm1, %xmm2
andps %xmm4, %xmm2
cvtss2sd %xmm2, %xmm2
comisd %xmm3, %xmm2
ja .L55
addq $1, %rdx
cmpq %rax, %rdx
jne .L35
movl $0, %eax
jmp .L11
.L44:
leaq 24(%rsp), %rdx
movq 8(%rbp), %rdi
leaq .LC0(%rip), %rsi
movl $0, %eax
call __isoc23_sscanf@PLT
jmp .L12
.L16:
subsd .LC5(%rip), %xmm0
cvttsd2siq %xmm0, %r15
btcq $63, %r15
jmp .L17
.L45:
movl %eax, %ecx
movl $38, %edx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L46:
movl %eax, %ecx
movl $43, %edx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L47:
movl %eax, %ecx
movl $48, %edx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L48:
movl %eax, %ecx
movl $53, %edx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L49:
movl %eax, %ecx
movl $58, %edx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L50:
movl %eax, %ecx
movl $63, %edx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L51:
movl %eax, %ecx
movl $76, %edx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L52:
movl %eax, %ecx
movl $82, %edx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L53:
movq 48(%rsp), %rdx
movq 40(%rsp), %rsi
movq 32(%rsp), %rdi
call _Z32__device_stub__Z9vectorSumPfS_S_PfS_S_
jmp .L30
.L54:
movl %eax, %ecx
movl $95, %edx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L55:
cvtss2sd %xmm0, %xmm0
cvtss2sd %xmm1, %xmm1
leaq .LC11(%rip), %rsi
movl $2, %edi
movl $2, %eax
call __printf_chk@PLT
movl $1, %eax
.L11:
movq 104(%rsp), %rdx
subq %fs:40, %rdx
jne .L56
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L37:
.cfi_restore_state
movl $0, %eax
jmp .L11
.L56:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC13:
.string "_Z9vectorSumPfS_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC13(%rip), %rdx
movq %rdx, %rcx
leaq _Z9vectorSumPfS_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC1:
.long 0
.long 1058013184
.align 8
.LC2:
.long 0
.long 1127219200
.align 8
.LC4:
.long 0
.long 1087373312
.align 8
.LC5:
.long 0
.long 1138753536
.section .rodata.cst16,"aM",@progbits,16
.align 16
.LC9:
.long 2147483647
.long 0
.long 0
.long 0
.section .rodata.cst8
.align 8
.LC10:
.long -1598689907
.long 1051772663
.align 8
.LC12:
.long -1
.long 2147483647
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "VectorSum.hip"
.globl _Z24__device_stub__vectorSumPfS_S_ # -- Begin function _Z24__device_stub__vectorSumPfS_S_
.p2align 4, 0x90
.type _Z24__device_stub__vectorSumPfS_S_,@function
_Z24__device_stub__vectorSumPfS_S_: # @_Z24__device_stub__vectorSumPfS_S_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9vectorSumPfS_S_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z24__device_stub__vectorSumPfS_S_, .Lfunc_end0-_Z24__device_stub__vectorSumPfS_S_
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI1_0:
.quad 0x3f10000000000000 # double 6.103515625E-5
.LCPI1_1:
.quad 0x40d0000000000000 # double 16384
.LCPI1_2:
.quad 0x43e0000000000000 # double 9.2233720368547758E+18
.LCPI1_4:
.quad 0x3eb0c6f7a0b5ed8d # double 9.9999999999999995E-7
.section .rodata.cst16,"aM",@progbits,16
.p2align 4, 0x0
.LCPI1_3:
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $168, %rsp
.cfi_def_cfa_offset 224
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rsi, %rbx
movl %edi, %ebp
movl $4194304, 8(%rsp) # imm = 0x400000
leaq 24(%rsp), %rdi
callq hipEventCreate
leaq 16(%rsp), %rdi
callq hipEventCreate
cmpl $2, %ebp
jl .LBB1_2
# %bb.1:
movq 8(%rbx), %rdi
leaq 8(%rsp), %rdx
movl $.L.str, %esi
xorl %eax, %eax
callq __isoc23_sscanf
.LBB1_2:
movl 8(%rsp), %r13d
leal 16383(%r13), %eax
cvtsi2sd %rax, %xmm0
mulsd .LCPI1_0(%rip), %xmm0
callq floor@PLT
mulsd .LCPI1_1(%rip), %xmm0
cvttsd2si %xmm0, %rax
movq %rax, %rcx
sarq $63, %rcx
subsd .LCPI1_2(%rip), %xmm0
cvttsd2si %xmm0, %rbp
andq %rcx, %rbp
orq %rax, %rbp
movl $4, %esi
movq %r13, %rdi
callq calloc
movq %rax, %r12
movl $4, %esi
movq %r13, %rdi
callq calloc
movq %rax, %r15
movl $4, %esi
movq %r13, %rdi
callq calloc
movq %rax, %rbx
movl $4, %esi
movq %r13, %rdi
callq calloc
movq %rax, 56(%rsp) # 8-byte Spill
leaq 64(%rsp), %rdi
callq time
movq 64(%rsp), %rdi
callq srand48
cmpl $0, 8(%rsp)
je .LBB1_5
# %bb.3: # %.lr.ph.preheader
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB1_4: # %.lr.ph
# =>This Inner Loop Header: Depth=1
callq drand48
cvtsd2ss %xmm0, %xmm0
movss %xmm0, (%r12,%r14,4)
callq drand48
cvtsd2ss %xmm0, %xmm0
movss %xmm0, (%r15,%r14,4)
incq %r14
movl 8(%rsp), %eax
cmpq %rax, %r14
jb .LBB1_4
.LBB1_5: # %._crit_edge
xorl %edi, %edi
callq hipSetDevice
shlq $2, %rbp
leaq 48(%rsp), %rdi
movq %rbp, %rsi
callq hipMalloc
testl %eax, %eax
jne .LBB1_6
# %bb.8:
movq 48(%rsp), %rdi
xorl %esi, %esi
movq %rbp, %rdx
callq hipMemset
testl %eax, %eax
jne .LBB1_9
# %bb.10:
leaq 40(%rsp), %rdi
movq %rbp, %rsi
callq hipMalloc
testl %eax, %eax
jne .LBB1_11
# %bb.12:
movq 40(%rsp), %rdi
xorl %esi, %esi
movq %rbp, %rdx
callq hipMemset
testl %eax, %eax
jne .LBB1_13
# %bb.14:
leaq 32(%rsp), %rdi
movq %rbp, %rsi
callq hipMalloc
testl %eax, %eax
jne .LBB1_15
# %bb.16:
movq 32(%rsp), %rdi
xorl %esi, %esi
movq %rbp, %rdx
callq hipMemset
testl %eax, %eax
jne .LBB1_17
# %bb.18:
leal (,%r13,4), %ebp
movq 24(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movl 8(%rsp), %eax
testq %rax, %rax
je .LBB1_21
# %bb.19: # %.lr.ph89.preheader
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB1_20: # %.lr.ph89
# =>This Inner Loop Header: Depth=1
movss (%r12,%rcx,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
addss (%r15,%rcx,4), %xmm0
movss %xmm0, (%rbx,%rcx,4)
incq %rcx
cmpq %rcx, %rax
jne .LBB1_20
.LBB1_21: # %._crit_edge90
movq 16(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 16(%rsp), %rdi
callq hipEventSynchronize
movq 24(%rsp), %rsi
movq 16(%rsp), %rdx
leaq 12(%rsp), %rdi
callq hipEventElapsedTime
movss 12(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.2, %edi
movb $1, %al
callq printf
movq 48(%rsp), %rdi
movslq %ebp, %r13
movq %r12, %rsi
movq %r13, %rdx
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB1_22
# %bb.23:
movq 40(%rsp), %rdi
movq %r15, %rsi
movq %r13, %rdx
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
movq 56(%rsp), %r14 # 8-byte Reload
jne .LBB1_24
# %bb.25:
movq 24(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movabsq $4294967328, %rdi # imm = 0x100000020
leaq 480(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_27
# %bb.26:
movq 48(%rsp), %rax
movq 40(%rsp), %rcx
movq 32(%rsp), %rdx
movq %rax, 136(%rsp)
movq %rcx, 128(%rsp)
movq %rdx, 120(%rsp)
leaq 136(%rsp), %rax
movq %rax, 144(%rsp)
leaq 128(%rsp), %rax
movq %rax, 152(%rsp)
leaq 120(%rsp), %rax
movq %rax, 160(%rsp)
leaq 104(%rsp), %rdi
leaq 88(%rsp), %rsi
leaq 80(%rsp), %rdx
leaq 72(%rsp), %rcx
callq __hipPopCallConfiguration
movq 104(%rsp), %rsi
movl 112(%rsp), %edx
movq 88(%rsp), %rcx
movl 96(%rsp), %r8d
leaq 144(%rsp), %r9
movl $_Z9vectorSumPfS_S_, %edi
pushq 72(%rsp)
.cfi_adjust_cfa_offset 8
pushq 88(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_27:
callq hipDeviceSynchronize
movq 16(%rsp), %rdi
xorl %r15d, %r15d
xorl %esi, %esi
callq hipEventRecord
movq 16(%rsp), %rdi
callq hipEventSynchronize
movq 32(%rsp), %rsi
movq %r14, %rdi
movq %r13, %rdx
movl $2, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB1_28
# %bb.29:
movq 24(%rsp), %rsi
movq 16(%rsp), %rdx
leaq 12(%rsp), %rdi
callq hipEventElapsedTime
movss 12(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.3, %edi
movb $1, %al
callq printf
movl 8(%rsp), %eax
testq %rax, %rax
je .LBB1_35
# %bb.30: # %.lr.ph93.preheader
xorl %esi, %esi
movaps .LCPI1_3(%rip), %xmm0 # xmm0 = [NaN,NaN,NaN,NaN]
movsd .LCPI1_4(%rip), %xmm1 # xmm1 = mem[0],zero
.p2align 4, 0x90
.LBB1_31: # %.lr.ph93
# =>This Inner Loop Header: Depth=1
movss (%rbx,%rsi,4), %xmm2 # xmm2 = mem[0],zero,zero,zero
movss (%r14,%rsi,4), %xmm3 # xmm3 = mem[0],zero,zero,zero
movaps %xmm2, %xmm4
subss %xmm3, %xmm4
andps %xmm0, %xmm4
cvtss2sd %xmm4, %xmm4
ucomisd %xmm1, %xmm4
ja .LBB1_32
# %bb.33: # in Loop: Header=BB1_31 Depth=1
incq %rsi
cmpq %rsi, %rax
jne .LBB1_31
# %bb.34:
xorl %r15d, %r15d
jmp .LBB1_35
.LBB1_32:
xorps %xmm0, %xmm0
cvtss2sd %xmm2, %xmm0
xorps %xmm1, %xmm1
cvtss2sd %xmm3, %xmm1
movl $.L.str.4, %edi
# kill: def $esi killed $esi killed $rsi
movb $2, %al
callq printf
movl $1, %r15d
.LBB1_35: # %.loopexit
movl %r15d, %eax
addq $168, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB1_6:
.cfi_def_cfa_offset 224
movl $.L.str.1, %edi
movl $40, %esi
jmp .LBB1_7
.LBB1_9:
movl $.L.str.1, %edi
movl $45, %esi
jmp .LBB1_7
.LBB1_11:
movl $.L.str.1, %edi
movl $50, %esi
jmp .LBB1_7
.LBB1_13:
movl $.L.str.1, %edi
movl $55, %esi
jmp .LBB1_7
.LBB1_15:
movl $.L.str.1, %edi
movl $60, %esi
jmp .LBB1_7
.LBB1_17:
movl $.L.str.1, %edi
movl $65, %esi
jmp .LBB1_7
.LBB1_22:
movl $.L.str.1, %edi
movl $78, %esi
jmp .LBB1_7
.LBB1_24:
movl $.L.str.1, %edi
movl $84, %esi
jmp .LBB1_7
.LBB1_28:
movl $.L.str.1, %edi
movl $97, %esi
.LBB1_7:
movl %eax, %edx
xorl %eax, %eax
callq printf
movl $1, %edi
callq exit
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9vectorSumPfS_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9vectorSumPfS_S_,@object # @_Z9vectorSumPfS_S_
.section .rodata,"a",@progbits
.globl _Z9vectorSumPfS_S_
.p2align 3, 0x0
_Z9vectorSumPfS_S_:
.quad _Z24__device_stub__vectorSumPfS_S_
.size _Z9vectorSumPfS_S_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "%d"
.size .L.str, 3
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "oops, %d, error: %d\n"
.size .L.str.1, 21
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "cpu time: %.3f ms\n"
.size .L.str.2, 19
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "gpu time: %.3f ms\n"
.size .L.str.3, 19
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "%d\t%f\t%f\n"
.size .L.str.4, 10
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z9vectorSumPfS_S_"
.size .L__unnamed_1, 19
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__vectorSumPfS_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9vectorSumPfS_S_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <stdio.h>
#include <cuda.h>
// Notice that this file needs to have a .cu extension for the NVCC compiler
// to understand what it is supposed to do. NVCC can compile C and C++, by
// emulating a C++ compiler. However, any code that contains GPU kernels
// must reside in a CUDA unit with .cu extension.
//-----------------------------------------------------------------------------
// TheKernel: basic kernel containing a print statement.
//-----------------------------------------------------------------------------
__global__ void TheKernel()
{
printf("This is the kernel saying hello world, from the GPU.\n");
}
//-----------------------------------------------------------------------------
// main
//-----------------------------------------------------------------------------
int main()
{
printf("This is the C layer saying hello world, from the host.\n");
// Launch the kernel
TheKernel<<<1, 1>>>();
// It appears essential to call for synchronization before finally
// exiting, lest you risk the program crashing your machine!
cudaDeviceSynchronize();
return 0;
} | code for sm_80
Function : _Z9TheKernelv
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ MOV R0, 0x0 ; /* 0x0000000000007802 */
/* 0x000fe20000000f00 */
/*0020*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x4][0x8] ; /* 0x01000200ff047624 */
/* 0x000fe200078e00ff */
/*0030*/ CS2R R6, SRZ ; /* 0x0000000000067805 */
/* 0x000fe2000001ff00 */
/*0040*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x4][0xc] ; /* 0x01000300ff057624 */
/* 0x000fe200078e00ff */
/*0050*/ LDC.64 R2, c[0x4][R0] ; /* 0x0100000000027b82 */
/* 0x00006c0000000a00 */
/*0060*/ LEPC R8 ; /* 0x000000000008734e */
/* 0x000fe40000000000 */
/*0070*/ MOV R11, 0xe0 ; /* 0x000000e0000b7802 */
/* 0x000fe40000000f00 */
/*0080*/ MOV R20, 0x60 ; /* 0x0000006000147802 */
/* 0x000fe40000000f00 */
/*0090*/ MOV R21, 0x0 ; /* 0x0000000000157802 */
/* 0x000fe40000000f00 */
/*00a0*/ MOV R0, 0x0 ; /* 0x0000000000007802 */
/* 0x001fc40000000f00 */
/*00b0*/ IADD3 R20, P0, P1, -R20, R11, R8 ; /* 0x0000000b14147210 */
/* 0x000fc8000791e108 */
/*00c0*/ IADD3.X R21, ~R0, R21, R9, P0, P1 ; /* 0x0000001500157210 */
/* 0x000fc800007e2509 */
/*00d0*/ CALL.ABS.NOINC R2 ; /* 0x0000000002007343 */
/* 0x002fea0003c00000 */
/*00e0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00f0*/ BRA 0xf0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
#include <cuda.h>
// Notice that this file needs to have a .cu extension for the NVCC compiler
// to understand what it is supposed to do. NVCC can compile C and C++, by
// emulating a C++ compiler. However, any code that contains GPU kernels
// must reside in a CUDA unit with .cu extension.
//-----------------------------------------------------------------------------
// TheKernel: basic kernel containing a print statement.
//-----------------------------------------------------------------------------
__global__ void TheKernel()
{
printf("This is the kernel saying hello world, from the GPU.\n");
}
//-----------------------------------------------------------------------------
// main
//-----------------------------------------------------------------------------
int main()
{
printf("This is the C layer saying hello world, from the host.\n");
// Launch the kernel
TheKernel<<<1, 1>>>();
// It appears essential to call for synchronization before finally
// exiting, lest you risk the program crashing your machine!
cudaDeviceSynchronize();
return 0;
} | .file "tmpxft_0016d86d_00000000-6_program_ia.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z27__device_stub__Z9TheKernelvv
.type _Z27__device_stub__Z9TheKernelvv, @function
_Z27__device_stub__Z9TheKernelvv:
.LFB2082:
.cfi_startproc
endbr64
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
leaq 8(%rsp), %rcx
movq %rsp, %rdx
leaq 28(%rsp), %rsi
leaq 16(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 8(%rsp)
.cfi_def_cfa_offset 104
pushq 8(%rsp)
.cfi_def_cfa_offset 112
leaq 80(%rsp), %r9
movq 44(%rsp), %rcx
movl 52(%rsp), %r8d
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
leaq _Z9TheKernelv(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 96
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z27__device_stub__Z9TheKernelvv, .-_Z27__device_stub__Z9TheKernelvv
.globl _Z9TheKernelv
.type _Z9TheKernelv, @function
_Z9TheKernelv:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z27__device_stub__Z9TheKernelvv
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z9TheKernelv, .-_Z9TheKernelv
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "This is the C layer saying hello world, from the host.\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
subq $40, %rsp
.cfi_def_cfa_offset 48
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 8(%rsp)
movl $1, 12(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L14
.L12:
call cudaDeviceSynchronize@PLT
movl $0, %eax
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L14:
.cfi_restore_state
call _Z27__device_stub__Z9TheKernelvv
jmp .L12
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "_Z9TheKernelv"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z9TheKernelv(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include <cuda.h>
// Notice that this file needs to have a .cu extension for the NVCC compiler
// to understand what it is supposed to do. NVCC can compile C and C++, by
// emulating a C++ compiler. However, any code that contains GPU kernels
// must reside in a CUDA unit with .cu extension.
//-----------------------------------------------------------------------------
// TheKernel: basic kernel containing a print statement.
//-----------------------------------------------------------------------------
__global__ void TheKernel()
{
printf("This is the kernel saying hello world, from the GPU.\n");
}
//-----------------------------------------------------------------------------
// main
//-----------------------------------------------------------------------------
int main()
{
printf("This is the C layer saying hello world, from the host.\n");
// Launch the kernel
TheKernel<<<1, 1>>>();
// It appears essential to call for synchronization before finally
// exiting, lest you risk the program crashing your machine!
cudaDeviceSynchronize();
return 0;
} | #include <stdio.h>
#include <hip/hip_runtime.h>
// Notice that this file needs to have a .cu extension for the NVCC compiler
// to understand what it is supposed to do. NVCC can compile C and C++, by
// emulating a C++ compiler. However, any code that contains GPU kernels
// must reside in a CUDA unit with .cu extension.
//-----------------------------------------------------------------------------
// TheKernel: basic kernel containing a print statement.
//-----------------------------------------------------------------------------
__global__ void TheKernel()
{
printf("This is the kernel saying hello world, from the GPU.\n");
}
//-----------------------------------------------------------------------------
// main
//-----------------------------------------------------------------------------
int main()
{
printf("This is the C layer saying hello world, from the host.\n");
// Launch the kernel
TheKernel<<<1, 1>>>();
// It appears essential to call for synchronization before finally
// exiting, lest you risk the program crashing your machine!
hipDeviceSynchronize();
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <stdio.h>
#include <hip/hip_runtime.h>
// Notice that this file needs to have a .cu extension for the NVCC compiler
// to understand what it is supposed to do. NVCC can compile C and C++, by
// emulating a C++ compiler. However, any code that contains GPU kernels
// must reside in a CUDA unit with .cu extension.
//-----------------------------------------------------------------------------
// TheKernel: basic kernel containing a print statement.
//-----------------------------------------------------------------------------
__global__ void TheKernel()
{
printf("This is the kernel saying hello world, from the GPU.\n");
}
//-----------------------------------------------------------------------------
// main
//-----------------------------------------------------------------------------
int main()
{
printf("This is the C layer saying hello world, from the host.\n");
// Launch the kernel
TheKernel<<<1, 1>>>();
// It appears essential to call for synchronization before finally
// exiting, lest you risk the program crashing your machine!
hipDeviceSynchronize();
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z9TheKernelv
.globl _Z9TheKernelv
.p2align 8
.type _Z9TheKernelv,@function
_Z9TheKernelv:
s_load_b64 s[2:3], s[0:1], 0x50
v_mbcnt_lo_u32_b32 v20, -1, 0
v_mov_b32_e32 v6, 0
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_dual_mov_b32 v7, 0 :: v_dual_mov_b32 v4, v20
v_readfirstlane_b32 s0, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_eq_u32_e64 s0, s0, v4
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_6
v_mov_b32_e32 v0, 0
s_mov_b32 s4, exec_lo
s_waitcnt lgkmcnt(0)
global_load_b64 v[8:9], v0, s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
s_clause 0x1
global_load_b64 v[1:2], v0, s[2:3] offset:40
global_load_b64 v[5:6], v0, s[2:3]
s_waitcnt vmcnt(1)
v_and_b32_e32 v1, v1, v8
v_and_b32_e32 v2, v2, v9
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mul_hi_u32 v3, v1, 24
v_mul_lo_u32 v2, v2, 24
v_mul_lo_u32 v1, v1, 24
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v2, v3, v2
s_waitcnt vmcnt(0)
v_add_co_u32 v1, vcc_lo, v5, v1
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v2, vcc_lo, v6, v2, vcc_lo
global_load_b64 v[6:7], v[1:2], off glc
s_waitcnt vmcnt(0)
global_atomic_cmpswap_b64 v[6:7], v0, v[6:9], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
v_cmpx_ne_u64_e64 v[6:7], v[8:9]
s_cbranch_execz .LBB0_5
s_mov_b32 s5, 0
.p2align 6
.LBB0_3:
s_sleep 1
s_clause 0x1
global_load_b64 v[1:2], v0, s[2:3] offset:40
global_load_b64 v[10:11], v0, s[2:3]
v_dual_mov_b32 v9, v7 :: v_dual_mov_b32 v8, v6
s_waitcnt vmcnt(1)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_and_b32_e32 v1, v1, v8
v_and_b32_e32 v7, v2, v9
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[5:6], null, v1, 24, v[10:11]
v_mov_b32_e32 v1, v6
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[2:3], null, v7, 24, v[1:2]
v_mov_b32_e32 v6, v2
global_load_b64 v[6:7], v[5:6], off glc
s_waitcnt vmcnt(0)
global_atomic_cmpswap_b64 v[6:7], v0, v[6:9], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
v_cmp_eq_u64_e32 vcc_lo, v[6:7], v[8:9]
s_or_b32 s5, vcc_lo, s5
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s5
s_cbranch_execnz .LBB0_3
s_or_b32 exec_lo, exec_lo, s5
.LBB0_5:
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 exec_lo, exec_lo, s4
.LBB0_6:
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 exec_lo, exec_lo, s1
v_mov_b32_e32 v5, 0
v_readfirstlane_b32 s4, v6
v_readfirstlane_b32 s5, v7
s_mov_b32 s8, exec_lo
s_waitcnt lgkmcnt(0)
s_clause 0x1
global_load_b64 v[8:9], v5, s[2:3] offset:40
global_load_b128 v[0:3], v5, s[2:3]
s_waitcnt vmcnt(1)
v_readfirstlane_b32 s6, v8
v_readfirstlane_b32 s7, v9
s_delay_alu instid0(VALU_DEP_1)
s_and_b64 s[6:7], s[4:5], s[6:7]
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_8
v_dual_mov_b32 v6, s8 :: v_dual_mov_b32 v7, 0
s_mul_i32 s8, s7, 24
s_mul_hi_u32 s9, s6, 24
v_dual_mov_b32 v8, 2 :: v_dual_mov_b32 v9, 1
s_add_i32 s9, s9, s8
s_mul_i32 s8, s6, 24
s_waitcnt vmcnt(0)
v_add_co_u32 v10, vcc_lo, v0, s8
v_add_co_ci_u32_e32 v11, vcc_lo, s9, v1, vcc_lo
global_store_b128 v[10:11], v[6:9], off offset:8
.LBB0_8:
s_or_b32 exec_lo, exec_lo, s1
s_lshl_b64 s[8:9], s[6:7], 12
v_lshlrev_b64 v[4:5], 6, v[4:5]
s_waitcnt vmcnt(0)
v_add_co_u32 v2, vcc_lo, v2, s8
v_add_co_ci_u32_e32 v7, vcc_lo, s9, v3, vcc_lo
v_mov_b32_e32 v3, 0
s_mov_b32 s8, 0
s_delay_alu instid0(VALU_DEP_3)
v_add_co_u32 v6, vcc_lo, v2, v4
v_mov_b32_e32 v2, 33
s_mov_b32 s9, s8
s_mov_b32 s10, s8
s_mov_b32 s11, s8
v_add_co_ci_u32_e32 v7, vcc_lo, v7, v5, vcc_lo
v_mov_b32_e32 v4, v3
v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v8, s8
v_dual_mov_b32 v9, s9 :: v_dual_mov_b32 v10, s10
v_mov_b32_e32 v11, s11
s_clause 0x3
global_store_b128 v[6:7], v[2:5], off
global_store_b128 v[6:7], v[8:11], off offset:16
global_store_b128 v[6:7], v[8:11], off offset:32
global_store_b128 v[6:7], v[8:11], off offset:48
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_16
v_dual_mov_b32 v10, 0 :: v_dual_mov_b32 v11, s4
v_mov_b32_e32 v12, s5
s_clause 0x1
global_load_b64 v[13:14], v10, s[2:3] offset:32 glc
global_load_b64 v[2:3], v10, s[2:3] offset:40
s_waitcnt vmcnt(0)
v_readfirstlane_b32 s8, v2
v_readfirstlane_b32 s9, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b64 s[8:9], s[8:9], s[4:5]
s_mul_i32 s9, s9, 24
s_mul_hi_u32 s10, s8, 24
s_mul_i32 s8, s8, 24
s_add_i32 s10, s10, s9
v_add_co_u32 v8, vcc_lo, v0, s8
v_add_co_ci_u32_e32 v9, vcc_lo, s10, v1, vcc_lo
s_mov_b32 s8, exec_lo
global_store_b64 v[8:9], v[13:14], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[4:5], v10, v[11:14], s[2:3] offset:32 glc
s_waitcnt vmcnt(0)
v_cmpx_ne_u64_e64 v[4:5], v[13:14]
s_cbranch_execz .LBB0_12
s_mov_b32 s9, 0
.LBB0_11:
v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
s_sleep 1
global_store_b64 v[8:9], v[4:5], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[2:3], v10, v[2:5], s[2:3] offset:32 glc
s_waitcnt vmcnt(0)
v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
s_or_b32 s9, vcc_lo, s9
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s9
s_cbranch_execnz .LBB0_11
.LBB0_12:
s_or_b32 exec_lo, exec_lo, s8
v_mov_b32_e32 v2, 0
s_mov_b32 s9, exec_lo
s_mov_b32 s8, exec_lo
v_mbcnt_lo_u32_b32 v4, s9, 0
global_load_b64 v[2:3], v2, s[2:3] offset:16
v_cmpx_eq_u32_e32 0, v4
s_cbranch_execz .LBB0_14
s_bcnt1_i32_b32 s9, s9
s_delay_alu instid0(SALU_CYCLE_1)
v_dual_mov_b32 v5, 0 :: v_dual_mov_b32 v4, s9
s_waitcnt vmcnt(0)
global_atomic_add_u64 v[2:3], v[4:5], off offset:8
.LBB0_14:
s_or_b32 exec_lo, exec_lo, s8
s_waitcnt vmcnt(0)
global_load_b64 v[4:5], v[2:3], off offset:16
s_waitcnt vmcnt(0)
v_cmp_eq_u64_e32 vcc_lo, 0, v[4:5]
s_cbranch_vccnz .LBB0_16
global_load_b32 v2, v[2:3], off offset:24
v_mov_b32_e32 v3, 0
s_waitcnt vmcnt(0)
v_readfirstlane_b32 s8, v2
s_waitcnt_vscnt null, 0x0
global_store_b64 v[4:5], v[2:3], off
s_and_b32 m0, s8, 0xff
s_sendmsg sendmsg(MSG_INTERRUPT)
.LBB0_16:
s_or_b32 exec_lo, exec_lo, s1
s_mul_i32 s1, s7, 24
s_mul_hi_u32 s7, s6, 24
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
s_add_i32 s7, s7, s1
s_mul_i32 s1, s6, 24
v_add_co_u32 v0, vcc_lo, v0, s1
v_add_co_ci_u32_e32 v1, vcc_lo, s7, v1, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, v0, 20
v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo
s_branch .LBB0_20
.p2align 6
.LBB0_17:
s_or_b32 exec_lo, exec_lo, s1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_readfirstlane_b32 s1, v2
s_cmp_eq_u32 s1, 0
s_cbranch_scc1 .LBB0_19
s_sleep 1
s_cbranch_execnz .LBB0_20
s_branch .LBB0_22
.p2align 6
.LBB0_19:
s_branch .LBB0_22
.LBB0_20:
v_mov_b32_e32 v2, 1
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_17
global_load_b32 v2, v[0:1], off glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
v_and_b32_e32 v2, 1, v2
s_branch .LBB0_17
.LBB0_22:
global_load_b64 v[22:23], v[6:7], off
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_26
v_mov_b32_e32 v6, 0
s_clause 0x2
global_load_b64 v[2:3], v6, s[2:3] offset:40
global_load_b64 v[7:8], v6, s[2:3] offset:24 glc
global_load_b64 v[4:5], v6, s[2:3]
s_waitcnt vmcnt(2)
v_add_co_u32 v9, vcc_lo, v2, 1
v_add_co_ci_u32_e32 v10, vcc_lo, 0, v3, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, v9, s4
v_add_co_ci_u32_e32 v1, vcc_lo, s5, v10, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1]
v_dual_cndmask_b32 v1, v1, v10 :: v_dual_cndmask_b32 v0, v0, v9
v_and_b32_e32 v3, v1, v3
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_and_b32_e32 v2, v0, v2
v_mul_lo_u32 v3, v3, 24
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_mul_hi_u32 v9, v2, 24
v_mul_lo_u32 v2, v2, 24
v_add_nc_u32_e32 v3, v9, v3
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_add_co_u32 v4, vcc_lo, v4, v2
v_mov_b32_e32 v2, v7
v_add_co_ci_u32_e32 v5, vcc_lo, v5, v3, vcc_lo
v_mov_b32_e32 v3, v8
global_store_b64 v[4:5], v[7:8], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[2:3], v6, v[0:3], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
v_cmp_ne_u64_e32 vcc_lo, v[2:3], v[7:8]
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB0_26
s_mov_b32 s0, 0
.LBB0_25:
s_sleep 1
global_store_b64 v[4:5], v[2:3], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[7:8], v6, v[0:3], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
v_cmp_eq_u64_e32 vcc_lo, v[7:8], v[2:3]
v_dual_mov_b32 v2, v7 :: v_dual_mov_b32 v3, v8
s_or_b32 s0, vcc_lo, s0
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s0
s_cbranch_execnz .LBB0_25
.LBB0_26:
s_or_b32 exec_lo, exec_lo, s1
s_getpc_b64 s[4:5]
s_add_u32 s4, s4, .str@rel32@lo+4
s_addc_u32 s5, s5, .str@rel32@hi+12
s_mov_b32 s0, -1
s_cmp_lg_u64 s[4:5], 0
s_cbranch_scc0 .LBB0_105
s_waitcnt vmcnt(0)
v_dual_mov_b32 v1, v23 :: v_dual_and_b32 v0, -3, v22
v_mov_b32_e32 v25, 0
s_mov_b64 s[6:7], 54
s_branch .LBB0_29
.LBB0_28:
s_or_b32 exec_lo, exec_lo, s1
s_sub_u32 s6, s6, s8
s_subb_u32 s7, s7, s9
s_add_u32 s4, s4, s8
s_addc_u32 s5, s5, s9
s_cmp_lg_u64 s[6:7], 0
s_cbranch_scc0 .LBB0_104
.LBB0_29:
v_cmp_lt_u64_e64 s0, s[6:7], 56
s_delay_alu instid0(VALU_DEP_1)
s_and_b32 s0, s0, exec_lo
s_cselect_b32 s8, s6, 56
s_cselect_b32 s9, s7, 0
s_cmp_gt_u32 s8, 7
s_mov_b32 s0, -1
s_cbranch_scc1 .LBB0_34
v_mov_b32_e32 v2, 0
v_mov_b32_e32 v3, 0
s_cmp_eq_u32 s8, 0
s_cbranch_scc1 .LBB0_33
s_lshl_b64 s[0:1], s[8:9], 3
s_mov_b64 s[10:11], 0
s_mov_b64 s[12:13], s[4:5]
.LBB0_32:
global_load_u8 v4, v25, s[12:13]
s_waitcnt vmcnt(0)
v_and_b32_e32 v24, 0xffff, v4
s_delay_alu instid0(VALU_DEP_1)
v_lshlrev_b64 v[4:5], s10, v[24:25]
s_add_u32 s10, s10, 8
s_addc_u32 s11, s11, 0
s_add_u32 s12, s12, 1
s_addc_u32 s13, s13, 0
s_cmp_lg_u32 s0, s10
v_or_b32_e32 v2, v4, v2
v_or_b32_e32 v3, v5, v3
s_cbranch_scc1 .LBB0_32
.LBB0_33:
s_mov_b32 s0, 0
s_mov_b32 s15, 0
.LBB0_34:
s_and_not1_b32 vcc_lo, exec_lo, s0
s_mov_b64 s[0:1], s[4:5]
s_cbranch_vccnz .LBB0_36
global_load_b64 v[2:3], v25, s[4:5]
s_add_i32 s15, s8, -8
s_add_u32 s0, s4, 8
s_addc_u32 s1, s5, 0
.LBB0_36:
s_cmp_gt_u32 s15, 7
s_cbranch_scc1 .LBB0_41
v_mov_b32_e32 v4, 0
v_mov_b32_e32 v5, 0
s_cmp_eq_u32 s15, 0
s_cbranch_scc1 .LBB0_40
s_mov_b64 s[10:11], 0
s_mov_b64 s[12:13], 0
.LBB0_39:
s_delay_alu instid0(SALU_CYCLE_1)
s_add_u32 s16, s0, s12
s_addc_u32 s17, s1, s13
s_add_u32 s12, s12, 1
global_load_u8 v6, v25, s[16:17]
s_addc_u32 s13, s13, 0
s_waitcnt vmcnt(0)
v_and_b32_e32 v24, 0xffff, v6
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[6:7], s10, v[24:25]
s_add_u32 s10, s10, 8
s_addc_u32 s11, s11, 0
s_cmp_lg_u32 s15, s12
v_or_b32_e32 v4, v6, v4
s_delay_alu instid0(VALU_DEP_2)
v_or_b32_e32 v5, v7, v5
s_cbranch_scc1 .LBB0_39
.LBB0_40:
s_mov_b32 s14, 0
s_cbranch_execz .LBB0_42
s_branch .LBB0_43
.LBB0_41:
.LBB0_42:
global_load_b64 v[4:5], v25, s[0:1]
s_add_i32 s14, s15, -8
s_add_u32 s0, s0, 8
s_addc_u32 s1, s1, 0
.LBB0_43:
s_cmp_gt_u32 s14, 7
s_cbranch_scc1 .LBB0_48
v_mov_b32_e32 v6, 0
v_mov_b32_e32 v7, 0
s_cmp_eq_u32 s14, 0
s_cbranch_scc1 .LBB0_47
s_mov_b64 s[10:11], 0
s_mov_b64 s[12:13], 0
.LBB0_46:
s_delay_alu instid0(SALU_CYCLE_1)
s_add_u32 s16, s0, s12
s_addc_u32 s17, s1, s13
s_add_u32 s12, s12, 1
global_load_u8 v8, v25, s[16:17]
s_addc_u32 s13, s13, 0
s_waitcnt vmcnt(0)
v_and_b32_e32 v24, 0xffff, v8
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[8:9], s10, v[24:25]
s_add_u32 s10, s10, 8
s_addc_u32 s11, s11, 0
s_cmp_lg_u32 s14, s12
v_or_b32_e32 v6, v8, v6
s_delay_alu instid0(VALU_DEP_2)
v_or_b32_e32 v7, v9, v7
s_cbranch_scc1 .LBB0_46
.LBB0_47:
s_mov_b32 s15, 0
s_cbranch_execz .LBB0_49
s_branch .LBB0_50
.LBB0_48:
.LBB0_49:
global_load_b64 v[6:7], v25, s[0:1]
s_add_i32 s15, s14, -8
s_add_u32 s0, s0, 8
s_addc_u32 s1, s1, 0
.LBB0_50:
s_cmp_gt_u32 s15, 7
s_cbranch_scc1 .LBB0_55
v_mov_b32_e32 v8, 0
v_mov_b32_e32 v9, 0
s_cmp_eq_u32 s15, 0
s_cbranch_scc1 .LBB0_54
s_mov_b64 s[10:11], 0
s_mov_b64 s[12:13], 0
.LBB0_53:
s_delay_alu instid0(SALU_CYCLE_1)
s_add_u32 s16, s0, s12
s_addc_u32 s17, s1, s13
s_add_u32 s12, s12, 1
global_load_u8 v10, v25, s[16:17]
s_addc_u32 s13, s13, 0
s_waitcnt vmcnt(0)
v_and_b32_e32 v24, 0xffff, v10
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[10:11], s10, v[24:25]
s_add_u32 s10, s10, 8
s_addc_u32 s11, s11, 0
s_cmp_lg_u32 s15, s12
v_or_b32_e32 v8, v10, v8
s_delay_alu instid0(VALU_DEP_2)
v_or_b32_e32 v9, v11, v9
s_cbranch_scc1 .LBB0_53
.LBB0_54:
s_mov_b32 s14, 0
s_cbranch_execz .LBB0_56
s_branch .LBB0_57
.LBB0_55:
.LBB0_56:
global_load_b64 v[8:9], v25, s[0:1]
s_add_i32 s14, s15, -8
s_add_u32 s0, s0, 8
s_addc_u32 s1, s1, 0
.LBB0_57:
s_cmp_gt_u32 s14, 7
s_cbranch_scc1 .LBB0_62
v_mov_b32_e32 v10, 0
v_mov_b32_e32 v11, 0
s_cmp_eq_u32 s14, 0
s_cbranch_scc1 .LBB0_61
s_mov_b64 s[10:11], 0
s_mov_b64 s[12:13], 0
.LBB0_60:
s_delay_alu instid0(SALU_CYCLE_1)
s_add_u32 s16, s0, s12
s_addc_u32 s17, s1, s13
s_add_u32 s12, s12, 1
global_load_u8 v12, v25, s[16:17]
s_addc_u32 s13, s13, 0
s_waitcnt vmcnt(0)
v_and_b32_e32 v24, 0xffff, v12
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[12:13], s10, v[24:25]
s_add_u32 s10, s10, 8
s_addc_u32 s11, s11, 0
s_cmp_lg_u32 s14, s12
v_or_b32_e32 v10, v12, v10
s_delay_alu instid0(VALU_DEP_2)
v_or_b32_e32 v11, v13, v11
s_cbranch_scc1 .LBB0_60
.LBB0_61:
s_mov_b32 s15, 0
s_cbranch_execz .LBB0_63
s_branch .LBB0_64
.LBB0_62:
.LBB0_63:
global_load_b64 v[10:11], v25, s[0:1]
s_add_i32 s15, s14, -8
s_add_u32 s0, s0, 8
s_addc_u32 s1, s1, 0
.LBB0_64:
s_cmp_gt_u32 s15, 7
s_cbranch_scc1 .LBB0_69
v_mov_b32_e32 v12, 0
v_mov_b32_e32 v13, 0
s_cmp_eq_u32 s15, 0
s_cbranch_scc1 .LBB0_68
s_mov_b64 s[10:11], 0
s_mov_b64 s[12:13], 0
.LBB0_67:
s_delay_alu instid0(SALU_CYCLE_1)
s_add_u32 s16, s0, s12
s_addc_u32 s17, s1, s13
s_add_u32 s12, s12, 1
global_load_u8 v14, v25, s[16:17]
s_addc_u32 s13, s13, 0
s_waitcnt vmcnt(0)
v_and_b32_e32 v24, 0xffff, v14
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[14:15], s10, v[24:25]
s_add_u32 s10, s10, 8
s_addc_u32 s11, s11, 0
s_cmp_lg_u32 s15, s12
v_or_b32_e32 v12, v14, v12
s_delay_alu instid0(VALU_DEP_2)
v_or_b32_e32 v13, v15, v13
s_cbranch_scc1 .LBB0_67
.LBB0_68:
s_mov_b32 s14, 0
s_cbranch_execz .LBB0_70
s_branch .LBB0_71
.LBB0_69:
.LBB0_70:
global_load_b64 v[12:13], v25, s[0:1]
s_add_i32 s14, s15, -8
s_add_u32 s0, s0, 8
s_addc_u32 s1, s1, 0
.LBB0_71:
s_cmp_gt_u32 s14, 7
s_cbranch_scc1 .LBB0_76
v_mov_b32_e32 v14, 0
v_mov_b32_e32 v15, 0
s_cmp_eq_u32 s14, 0
s_cbranch_scc1 .LBB0_75
s_mov_b64 s[10:11], 0
s_mov_b64 s[12:13], s[0:1]
.LBB0_74:
global_load_u8 v16, v25, s[12:13]
s_add_i32 s14, s14, -1
s_waitcnt vmcnt(0)
v_and_b32_e32 v24, 0xffff, v16
s_delay_alu instid0(VALU_DEP_1)
v_lshlrev_b64 v[16:17], s10, v[24:25]
s_add_u32 s10, s10, 8
s_addc_u32 s11, s11, 0
s_add_u32 s12, s12, 1
s_addc_u32 s13, s13, 0
s_cmp_lg_u32 s14, 0
v_or_b32_e32 v14, v16, v14
v_or_b32_e32 v15, v17, v15
s_cbranch_scc1 .LBB0_74
.LBB0_75:
s_cbranch_execz .LBB0_77
s_branch .LBB0_78
.LBB0_76:
.LBB0_77:
global_load_b64 v[14:15], v25, s[0:1]
.LBB0_78:
v_mov_b32_e32 v24, v20
v_mov_b32_e32 v26, 0
v_mov_b32_e32 v27, 0
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_readfirstlane_b32 s0, v24
v_cmp_eq_u32_e64 s0, s0, v24
s_delay_alu instid0(VALU_DEP_1)
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_84
global_load_b64 v[18:19], v25, s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
s_clause 0x1
global_load_b64 v[16:17], v25, s[2:3] offset:40
global_load_b64 v[26:27], v25, s[2:3]
s_mov_b32 s10, exec_lo
s_waitcnt vmcnt(1)
v_and_b32_e32 v17, v17, v19
v_and_b32_e32 v16, v16, v18
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mul_lo_u32 v17, v17, 24
v_mul_hi_u32 v21, v16, 24
v_mul_lo_u32 v16, v16, 24
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v17, v21, v17
s_waitcnt vmcnt(0)
v_add_co_u32 v16, vcc_lo, v26, v16
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v17, vcc_lo, v27, v17, vcc_lo
global_load_b64 v[16:17], v[16:17], off glc
s_waitcnt vmcnt(0)
global_atomic_cmpswap_b64 v[26:27], v25, v[16:19], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
v_cmpx_ne_u64_e64 v[26:27], v[18:19]
s_cbranch_execz .LBB0_83
s_mov_b32 s11, 0
.p2align 6
.LBB0_81:
s_sleep 1
s_clause 0x1
global_load_b64 v[16:17], v25, s[2:3] offset:40
global_load_b64 v[28:29], v25, s[2:3]
v_dual_mov_b32 v18, v26 :: v_dual_mov_b32 v19, v27
s_waitcnt vmcnt(1)
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_and_b32_e32 v16, v16, v18
s_waitcnt vmcnt(0)
v_mad_u64_u32 v[26:27], null, v16, 24, v[28:29]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_dual_mov_b32 v16, v27 :: v_dual_and_b32 v17, v17, v19
v_mad_u64_u32 v[27:28], null, v17, 24, v[16:17]
global_load_b64 v[16:17], v[26:27], off glc
s_waitcnt vmcnt(0)
global_atomic_cmpswap_b64 v[26:27], v25, v[16:19], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
v_cmp_eq_u64_e32 vcc_lo, v[26:27], v[18:19]
s_or_b32 s11, vcc_lo, s11
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s11
s_cbranch_execnz .LBB0_81
s_or_b32 exec_lo, exec_lo, s11
.LBB0_83:
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 exec_lo, exec_lo, s10
.LBB0_84:
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 exec_lo, exec_lo, s1
s_clause 0x1
global_load_b64 v[28:29], v25, s[2:3] offset:40
global_load_b128 v[16:19], v25, s[2:3]
v_readfirstlane_b32 s10, v26
v_readfirstlane_b32 s11, v27
s_mov_b32 s14, exec_lo
s_waitcnt vmcnt(1)
v_readfirstlane_b32 s12, v28
v_readfirstlane_b32 s13, v29
s_delay_alu instid0(VALU_DEP_1)
s_and_b64 s[12:13], s[10:11], s[12:13]
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_86
v_dual_mov_b32 v26, s14 :: v_dual_mov_b32 v27, 0
s_mul_i32 s14, s13, 24
s_mul_hi_u32 s15, s12, 24
v_dual_mov_b32 v28, 2 :: v_dual_mov_b32 v29, 1
s_add_i32 s15, s15, s14
s_mul_i32 s14, s12, 24
s_waitcnt vmcnt(0)
v_add_co_u32 v30, vcc_lo, v16, s14
v_add_co_ci_u32_e32 v31, vcc_lo, s15, v17, vcc_lo
global_store_b128 v[30:31], v[26:29], off offset:8
.LBB0_86:
s_or_b32 exec_lo, exec_lo, s1
v_cmp_gt_u64_e64 vcc_lo, s[6:7], 56
v_or_b32_e32 v21, 2, v0
s_lshl_b64 s[14:15], s[12:13], 12
v_lshlrev_b64 v[26:27], 6, v[24:25]
s_lshl_b32 s1, s8, 2
s_delay_alu instid0(SALU_CYCLE_1)
s_add_i32 s1, s1, 28
v_cndmask_b32_e32 v0, v21, v0, vcc_lo
s_waitcnt vmcnt(0)
v_add_co_u32 v18, vcc_lo, v18, s14
v_add_co_ci_u32_e32 v19, vcc_lo, s15, v19, vcc_lo
s_and_b32 s1, s1, 0x1e0
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_add_co_u32 v18, vcc_lo, v18, v26
v_and_or_b32 v0, v0, 0xffffff1f, s1
v_add_co_ci_u32_e32 v19, vcc_lo, v19, v27, vcc_lo
s_clause 0x3
global_store_b128 v[18:19], v[0:3], off
global_store_b128 v[18:19], v[4:7], off offset:16
global_store_b128 v[18:19], v[8:11], off offset:32
global_store_b128 v[18:19], v[12:15], off offset:48
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_94
s_clause 0x1
global_load_b64 v[8:9], v25, s[2:3] offset:32 glc
global_load_b64 v[0:1], v25, s[2:3] offset:40
v_dual_mov_b32 v6, s10 :: v_dual_mov_b32 v7, s11
s_waitcnt vmcnt(0)
v_readfirstlane_b32 s14, v0
v_readfirstlane_b32 s15, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b64 s[14:15], s[14:15], s[10:11]
s_mul_i32 s15, s15, 24
s_mul_hi_u32 s16, s14, 24
s_mul_i32 s14, s14, 24
s_add_i32 s16, s16, s15
v_add_co_u32 v4, vcc_lo, v16, s14
v_add_co_ci_u32_e32 v5, vcc_lo, s16, v17, vcc_lo
s_mov_b32 s14, exec_lo
global_store_b64 v[4:5], v[8:9], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[2:3], v25, v[6:9], s[2:3] offset:32 glc
s_waitcnt vmcnt(0)
v_cmpx_ne_u64_e64 v[2:3], v[8:9]
s_cbranch_execz .LBB0_90
s_mov_b32 s15, 0
.LBB0_89:
v_dual_mov_b32 v0, s10 :: v_dual_mov_b32 v1, s11
s_sleep 1
global_store_b64 v[4:5], v[2:3], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[0:1], v25, v[0:3], s[2:3] offset:32 glc
s_waitcnt vmcnt(0)
v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
s_or_b32 s15, vcc_lo, s15
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s15
s_cbranch_execnz .LBB0_89
.LBB0_90:
s_or_b32 exec_lo, exec_lo, s14
global_load_b64 v[0:1], v25, s[2:3] offset:16
s_mov_b32 s15, exec_lo
s_mov_b32 s14, exec_lo
v_mbcnt_lo_u32_b32 v2, s15, 0
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_eq_u32_e32 0, v2
s_cbranch_execz .LBB0_92
s_bcnt1_i32_b32 s15, s15
s_delay_alu instid0(SALU_CYCLE_1)
v_dual_mov_b32 v3, 0 :: v_dual_mov_b32 v2, s15
s_waitcnt vmcnt(0)
global_atomic_add_u64 v[0:1], v[2:3], off offset:8
.LBB0_92:
s_or_b32 exec_lo, exec_lo, s14
s_waitcnt vmcnt(0)
global_load_b64 v[2:3], v[0:1], off offset:16
s_waitcnt vmcnt(0)
v_cmp_eq_u64_e32 vcc_lo, 0, v[2:3]
s_cbranch_vccnz .LBB0_94
global_load_b32 v24, v[0:1], off offset:24
s_waitcnt vmcnt(0)
v_readfirstlane_b32 s14, v24
s_waitcnt_vscnt null, 0x0
global_store_b64 v[2:3], v[24:25], off
s_and_b32 m0, s14, 0xff
s_sendmsg sendmsg(MSG_INTERRUPT)
.LBB0_94:
s_or_b32 exec_lo, exec_lo, s1
s_mul_i32 s1, s13, 24
s_mul_hi_u32 s13, s12, 24
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
s_add_i32 s13, s13, s1
s_mul_i32 s1, s12, 24
v_add_co_u32 v0, vcc_lo, v16, s1
v_add_co_ci_u32_e32 v1, vcc_lo, s13, v17, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, v0, 20
v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo
s_branch .LBB0_98
.p2align 6
.LBB0_95:
s_or_b32 exec_lo, exec_lo, s1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_readfirstlane_b32 s1, v2
s_cmp_eq_u32 s1, 0
s_cbranch_scc1 .LBB0_97
s_sleep 1
s_cbranch_execnz .LBB0_98
s_branch .LBB0_100
.p2align 6
.LBB0_97:
s_branch .LBB0_100
.LBB0_98:
v_mov_b32_e32 v2, 1
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_95
global_load_b32 v2, v[0:1], off glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
v_and_b32_e32 v2, 1, v2
s_branch .LBB0_95
.LBB0_100:
global_load_b64 v[0:1], v[18:19], off
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_28
s_clause 0x2
global_load_b64 v[4:5], v25, s[2:3] offset:40
global_load_b64 v[8:9], v25, s[2:3] offset:24 glc
global_load_b64 v[6:7], v25, s[2:3]
s_waitcnt vmcnt(2)
v_add_co_u32 v10, vcc_lo, v4, 1
v_add_co_ci_u32_e32 v11, vcc_lo, 0, v5, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, v10, s10
v_add_co_ci_u32_e32 v3, vcc_lo, s11, v11, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_cmp_eq_u64_e32 vcc_lo, 0, v[2:3]
v_dual_cndmask_b32 v3, v3, v11 :: v_dual_cndmask_b32 v2, v2, v10
v_and_b32_e32 v5, v3, v5
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_and_b32_e32 v4, v2, v4
v_mul_hi_u32 v10, v4, 24
v_mul_lo_u32 v4, v4, 24
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_add_co_u32 v6, vcc_lo, v6, v4
v_mov_b32_e32 v4, v8
v_mul_lo_u32 v5, v5, 24
v_add_nc_u32_e32 v5, v10, v5
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e32 v7, vcc_lo, v7, v5, vcc_lo
v_mov_b32_e32 v5, v9
global_store_b64 v[6:7], v[8:9], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[4:5], v25, v[2:5], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
v_cmp_ne_u64_e32 vcc_lo, v[4:5], v[8:9]
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB0_28
s_mov_b32 s0, 0
.LBB0_103:
s_sleep 1
global_store_b64 v[6:7], v[4:5], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[8:9], v25, v[2:5], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
v_cmp_eq_u64_e32 vcc_lo, v[8:9], v[4:5]
v_dual_mov_b32 v4, v8 :: v_dual_mov_b32 v5, v9
s_or_b32 s0, vcc_lo, s0
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s0
s_cbranch_execnz .LBB0_103
s_branch .LBB0_28
.LBB0_104:
s_mov_b32 s0, 0
.LBB0_105:
s_delay_alu instid0(SALU_CYCLE_1)
s_and_b32 vcc_lo, exec_lo, s0
s_cbranch_vccz .LBB0_132
v_readfirstlane_b32 s0, v20
v_mov_b32_e32 v4, 0
v_mov_b32_e32 v5, 0
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_eq_u32_e64 s0, s0, v20
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_112
s_waitcnt vmcnt(0)
v_mov_b32_e32 v0, 0
s_mov_b32 s4, exec_lo
global_load_b64 v[6:7], v0, s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
s_clause 0x1
global_load_b64 v[1:2], v0, s[2:3] offset:40
global_load_b64 v[3:4], v0, s[2:3]
s_waitcnt vmcnt(1)
v_and_b32_e32 v1, v1, v6
v_and_b32_e32 v2, v2, v7
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mul_hi_u32 v5, v1, 24
v_mul_lo_u32 v2, v2, 24
v_mul_lo_u32 v1, v1, 24
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v2, v5, v2
s_waitcnt vmcnt(0)
v_add_co_u32 v1, vcc_lo, v3, v1
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v2, vcc_lo, v4, v2, vcc_lo
global_load_b64 v[4:5], v[1:2], off glc
s_waitcnt vmcnt(0)
global_atomic_cmpswap_b64 v[4:5], v0, v[4:7], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
v_cmpx_ne_u64_e64 v[4:5], v[6:7]
s_cbranch_execz .LBB0_111
s_mov_b32 s5, 0
.p2align 6
.LBB0_109:
s_sleep 1
s_clause 0x1
global_load_b64 v[1:2], v0, s[2:3] offset:40
global_load_b64 v[8:9], v0, s[2:3]
v_dual_mov_b32 v7, v5 :: v_dual_mov_b32 v6, v4
s_waitcnt vmcnt(1)
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_and_b32_e32 v1, v1, v6
s_waitcnt vmcnt(0)
v_mad_u64_u32 v[3:4], null, v1, 24, v[8:9]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_dual_mov_b32 v1, v4 :: v_dual_and_b32 v2, v2, v7
v_mad_u64_u32 v[4:5], null, v2, 24, v[1:2]
global_load_b64 v[4:5], v[3:4], off glc
s_waitcnt vmcnt(0)
global_atomic_cmpswap_b64 v[4:5], v0, v[4:7], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
v_cmp_eq_u64_e32 vcc_lo, v[4:5], v[6:7]
s_or_b32 s5, vcc_lo, s5
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s5
s_cbranch_execnz .LBB0_109
s_or_b32 exec_lo, exec_lo, s5
.LBB0_111:
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 exec_lo, exec_lo, s4
.LBB0_112:
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 exec_lo, exec_lo, s1
v_mov_b32_e32 v21, 0
v_readfirstlane_b32 s4, v4
v_readfirstlane_b32 s5, v5
s_mov_b32 s8, exec_lo
s_clause 0x1
global_load_b64 v[6:7], v21, s[2:3] offset:40
global_load_b128 v[0:3], v21, s[2:3]
s_waitcnt vmcnt(1)
v_readfirstlane_b32 s6, v6
v_readfirstlane_b32 s7, v7
s_delay_alu instid0(VALU_DEP_1)
s_and_b64 s[6:7], s[4:5], s[6:7]
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_114
v_dual_mov_b32 v4, s8 :: v_dual_mov_b32 v5, 0
s_mul_i32 s8, s7, 24
s_mul_hi_u32 s9, s6, 24
v_dual_mov_b32 v6, 2 :: v_dual_mov_b32 v7, 1
s_add_i32 s9, s9, s8
s_mul_i32 s8, s6, 24
s_waitcnt vmcnt(0)
v_add_co_u32 v8, vcc_lo, v0, s8
v_add_co_ci_u32_e32 v9, vcc_lo, s9, v1, vcc_lo
global_store_b128 v[8:9], v[4:7], off offset:8
.LBB0_114:
s_or_b32 exec_lo, exec_lo, s1
s_lshl_b64 s[8:9], s[6:7], 12
v_and_or_b32 v22, v22, 0xffffff1d, 34
s_waitcnt vmcnt(0)
v_add_co_u32 v4, vcc_lo, v2, s8
v_add_co_ci_u32_e32 v5, vcc_lo, s9, v3, vcc_lo
v_lshlrev_b64 v[2:3], 6, v[20:21]
s_mov_b32 s8, 0
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
s_mov_b32 s9, s8
s_mov_b32 s10, s8
s_mov_b32 s11, s8
v_add_co_u32 v8, vcc_lo, v4, v2
v_mov_b32_e32 v6, 0
v_add_co_ci_u32_e32 v9, vcc_lo, v5, v3, vcc_lo
v_dual_mov_b32 v2, s8 :: v_dual_mov_b32 v5, s11
v_dual_mov_b32 v3, s9 :: v_dual_mov_b32 v4, s10
s_delay_alu instid0(VALU_DEP_4)
v_mov_b32_e32 v7, v6
s_clause 0x4
global_store_b64 v[8:9], v[22:23], off
global_store_b128 v[8:9], v[2:5], off offset:8
global_store_b128 v[8:9], v[2:5], off offset:24
global_store_b128 v[8:9], v[2:5], off offset:40
global_store_b64 v[8:9], v[6:7], off offset:56
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_122
v_dual_mov_b32 v8, 0 :: v_dual_mov_b32 v9, s4
v_mov_b32_e32 v10, s5
s_clause 0x1
global_load_b64 v[11:12], v8, s[2:3] offset:32 glc
global_load_b64 v[2:3], v8, s[2:3] offset:40
s_waitcnt vmcnt(0)
v_readfirstlane_b32 s8, v2
v_readfirstlane_b32 s9, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b64 s[8:9], s[8:9], s[4:5]
s_mul_i32 s9, s9, 24
s_mul_hi_u32 s10, s8, 24
s_mul_i32 s8, s8, 24
s_add_i32 s10, s10, s9
v_add_co_u32 v6, vcc_lo, v0, s8
v_add_co_ci_u32_e32 v7, vcc_lo, s10, v1, vcc_lo
s_mov_b32 s8, exec_lo
global_store_b64 v[6:7], v[11:12], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[4:5], v8, v[9:12], s[2:3] offset:32 glc
s_waitcnt vmcnt(0)
v_cmpx_ne_u64_e64 v[4:5], v[11:12]
s_cbranch_execz .LBB0_118
s_mov_b32 s9, 0
.LBB0_117:
v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
s_sleep 1
global_store_b64 v[6:7], v[4:5], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[2:3], v8, v[2:5], s[2:3] offset:32 glc
s_waitcnt vmcnt(0)
v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
s_or_b32 s9, vcc_lo, s9
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s9
s_cbranch_execnz .LBB0_117
.LBB0_118:
s_or_b32 exec_lo, exec_lo, s8
v_mov_b32_e32 v2, 0
s_mov_b32 s9, exec_lo
s_mov_b32 s8, exec_lo
v_mbcnt_lo_u32_b32 v4, s9, 0
global_load_b64 v[2:3], v2, s[2:3] offset:16
v_cmpx_eq_u32_e32 0, v4
s_cbranch_execz .LBB0_120
s_bcnt1_i32_b32 s9, s9
s_delay_alu instid0(SALU_CYCLE_1)
v_dual_mov_b32 v5, 0 :: v_dual_mov_b32 v4, s9
s_waitcnt vmcnt(0)
global_atomic_add_u64 v[2:3], v[4:5], off offset:8
.LBB0_120:
s_or_b32 exec_lo, exec_lo, s8
s_waitcnt vmcnt(0)
global_load_b64 v[4:5], v[2:3], off offset:16
s_waitcnt vmcnt(0)
v_cmp_eq_u64_e32 vcc_lo, 0, v[4:5]
s_cbranch_vccnz .LBB0_122
global_load_b32 v2, v[2:3], off offset:24
v_mov_b32_e32 v3, 0
s_waitcnt vmcnt(0)
v_readfirstlane_b32 s8, v2
s_waitcnt_vscnt null, 0x0
global_store_b64 v[4:5], v[2:3], off
s_and_b32 m0, s8, 0xff
s_sendmsg sendmsg(MSG_INTERRUPT)
.LBB0_122:
s_or_b32 exec_lo, exec_lo, s1
s_mul_i32 s1, s7, 24
s_mul_hi_u32 s7, s6, 24
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
s_add_i32 s7, s7, s1
s_mul_i32 s1, s6, 24
v_add_co_u32 v0, vcc_lo, v0, s1
v_add_co_ci_u32_e32 v1, vcc_lo, s7, v1, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, v0, 20
v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo
s_branch .LBB0_126
.p2align 6
.LBB0_123:
s_or_b32 exec_lo, exec_lo, s1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_readfirstlane_b32 s1, v2
s_cmp_eq_u32 s1, 0
s_cbranch_scc1 .LBB0_125
s_sleep 1
s_cbranch_execnz .LBB0_126
s_branch .LBB0_128
.p2align 6
.LBB0_125:
s_branch .LBB0_128
.LBB0_126:
v_mov_b32_e32 v2, 1
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_123
global_load_b32 v2, v[0:1], off glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
v_and_b32_e32 v2, 1, v2
s_branch .LBB0_123
.LBB0_128:
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_132
v_mov_b32_e32 v6, 0
s_clause 0x2
global_load_b64 v[2:3], v6, s[2:3] offset:40
global_load_b64 v[7:8], v6, s[2:3] offset:24 glc
global_load_b64 v[4:5], v6, s[2:3]
s_waitcnt vmcnt(2)
v_add_co_u32 v9, vcc_lo, v2, 1
v_add_co_ci_u32_e32 v10, vcc_lo, 0, v3, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, v9, s4
v_add_co_ci_u32_e32 v1, vcc_lo, s5, v10, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1]
v_dual_cndmask_b32 v1, v1, v10 :: v_dual_cndmask_b32 v0, v0, v9
v_and_b32_e32 v3, v1, v3
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_and_b32_e32 v2, v0, v2
v_mul_lo_u32 v3, v3, 24
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_mul_hi_u32 v9, v2, 24
v_mul_lo_u32 v2, v2, 24
v_add_nc_u32_e32 v3, v9, v3
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_add_co_u32 v4, vcc_lo, v4, v2
v_mov_b32_e32 v2, v7
v_add_co_ci_u32_e32 v5, vcc_lo, v5, v3, vcc_lo
v_mov_b32_e32 v3, v8
global_store_b64 v[4:5], v[7:8], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[2:3], v6, v[0:3], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
v_cmp_ne_u64_e32 vcc_lo, v[2:3], v[7:8]
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB0_132
s_mov_b32 s0, 0
.LBB0_131:
s_sleep 1
global_store_b64 v[4:5], v[2:3], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[7:8], v6, v[0:3], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
v_cmp_eq_u64_e32 vcc_lo, v[7:8], v[2:3]
v_dual_mov_b32 v2, v7 :: v_dual_mov_b32 v3, v8
s_or_b32 s0, vcc_lo, s0
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s0
s_cbranch_execnz .LBB0_131
.LBB0_132:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9TheKernelv
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 256
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 32
.amdhsa_next_free_sgpr 18
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z9TheKernelv, .Lfunc_end0-_Z9TheKernelv
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type .str,@object
.section .rodata.str1.1,"aMS",@progbits,1
.str:
.asciz "This is the kernel saying hello world, from the GPU.\n"
.size .str, 54
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: hidden_block_count_x
- .offset: 4
.size: 4
.value_kind: hidden_block_count_y
- .offset: 8
.size: 4
.value_kind: hidden_block_count_z
- .offset: 12
.size: 2
.value_kind: hidden_group_size_x
- .offset: 14
.size: 2
.value_kind: hidden_group_size_y
- .offset: 16
.size: 2
.value_kind: hidden_group_size_z
- .offset: 18
.size: 2
.value_kind: hidden_remainder_x
- .offset: 20
.size: 2
.value_kind: hidden_remainder_y
- .offset: 22
.size: 2
.value_kind: hidden_remainder_z
- .offset: 40
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 48
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 64
.size: 2
.value_kind: hidden_grid_dims
- .offset: 80
.size: 8
.value_kind: hidden_hostcall_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 256
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9TheKernelv
.private_segment_fixed_size: 0
.sgpr_count: 20
.sgpr_spill_count: 0
.symbol: _Z9TheKernelv.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 32
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <stdio.h>
#include <hip/hip_runtime.h>
// Notice that this file needs to have a .cu extension for the NVCC compiler
// to understand what it is supposed to do. NVCC can compile C and C++, by
// emulating a C++ compiler. However, any code that contains GPU kernels
// must reside in a CUDA unit with .cu extension.
//-----------------------------------------------------------------------------
// TheKernel: basic kernel containing a print statement.
//-----------------------------------------------------------------------------
__global__ void TheKernel()
{
printf("This is the kernel saying hello world, from the GPU.\n");
}
//-----------------------------------------------------------------------------
// main
//-----------------------------------------------------------------------------
int main()
{
printf("This is the C layer saying hello world, from the host.\n");
// Launch the kernel
TheKernel<<<1, 1>>>();
// It appears essential to call for synchronization before finally
// exiting, lest you risk the program crashing your machine!
hipDeviceSynchronize();
return 0;
} | .text
.file "program_ia.hip"
.globl _Z24__device_stub__TheKernelv # -- Begin function _Z24__device_stub__TheKernelv
.p2align 4, 0x90
.type _Z24__device_stub__TheKernelv,@function
_Z24__device_stub__TheKernelv: # @_Z24__device_stub__TheKernelv
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z9TheKernelv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $72, %rsp
.cfi_adjust_cfa_offset -72
retq
.Lfunc_end0:
.size _Z24__device_stub__TheKernelv, .Lfunc_end0-_Z24__device_stub__TheKernelv
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
movl $.Lstr, %edi
callq puts@PLT
movabsq $4294967297, %rdi # imm = 0x100000001
movl $1, %esi
movq %rdi, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z9TheKernelv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
callq hipDeviceSynchronize
xorl %eax, %eax
addq $56, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9TheKernelv, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9TheKernelv,@object # @_Z9TheKernelv
.section .rodata,"a",@progbits
.globl _Z9TheKernelv
.p2align 3, 0x0
_Z9TheKernelv:
.quad _Z24__device_stub__TheKernelv
.size _Z9TheKernelv, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z9TheKernelv"
.size .L__unnamed_1, 14
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "This is the C layer saying hello world, from the host."
.size .Lstr, 55
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__TheKernelv
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9TheKernelv
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z9TheKernelv
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ MOV R0, 0x0 ; /* 0x0000000000007802 */
/* 0x000fe20000000f00 */
/*0020*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x4][0x8] ; /* 0x01000200ff047624 */
/* 0x000fe200078e00ff */
/*0030*/ CS2R R6, SRZ ; /* 0x0000000000067805 */
/* 0x000fe2000001ff00 */
/*0040*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x4][0xc] ; /* 0x01000300ff057624 */
/* 0x000fe200078e00ff */
/*0050*/ LDC.64 R2, c[0x4][R0] ; /* 0x0100000000027b82 */
/* 0x00006c0000000a00 */
/*0060*/ LEPC R8 ; /* 0x000000000008734e */
/* 0x000fe40000000000 */
/*0070*/ MOV R11, 0xe0 ; /* 0x000000e0000b7802 */
/* 0x000fe40000000f00 */
/*0080*/ MOV R20, 0x60 ; /* 0x0000006000147802 */
/* 0x000fe40000000f00 */
/*0090*/ MOV R21, 0x0 ; /* 0x0000000000157802 */
/* 0x000fe40000000f00 */
/*00a0*/ MOV R0, 0x0 ; /* 0x0000000000007802 */
/* 0x001fc40000000f00 */
/*00b0*/ IADD3 R20, P0, P1, -R20, R11, R8 ; /* 0x0000000b14147210 */
/* 0x000fc8000791e108 */
/*00c0*/ IADD3.X R21, ~R0, R21, R9, P0, P1 ; /* 0x0000001500157210 */
/* 0x000fc800007e2509 */
/*00d0*/ CALL.ABS.NOINC R2 ; /* 0x0000000002007343 */
/* 0x002fea0003c00000 */
/*00e0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00f0*/ BRA 0xf0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z9TheKernelv
.globl _Z9TheKernelv
.p2align 8
.type _Z9TheKernelv,@function
_Z9TheKernelv:
s_load_b64 s[2:3], s[0:1], 0x50
v_mbcnt_lo_u32_b32 v20, -1, 0
v_mov_b32_e32 v6, 0
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_dual_mov_b32 v7, 0 :: v_dual_mov_b32 v4, v20
v_readfirstlane_b32 s0, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_eq_u32_e64 s0, s0, v4
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_6
v_mov_b32_e32 v0, 0
s_mov_b32 s4, exec_lo
s_waitcnt lgkmcnt(0)
global_load_b64 v[8:9], v0, s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
s_clause 0x1
global_load_b64 v[1:2], v0, s[2:3] offset:40
global_load_b64 v[5:6], v0, s[2:3]
s_waitcnt vmcnt(1)
v_and_b32_e32 v1, v1, v8
v_and_b32_e32 v2, v2, v9
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mul_hi_u32 v3, v1, 24
v_mul_lo_u32 v2, v2, 24
v_mul_lo_u32 v1, v1, 24
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v2, v3, v2
s_waitcnt vmcnt(0)
v_add_co_u32 v1, vcc_lo, v5, v1
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v2, vcc_lo, v6, v2, vcc_lo
global_load_b64 v[6:7], v[1:2], off glc
s_waitcnt vmcnt(0)
global_atomic_cmpswap_b64 v[6:7], v0, v[6:9], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
v_cmpx_ne_u64_e64 v[6:7], v[8:9]
s_cbranch_execz .LBB0_5
s_mov_b32 s5, 0
.p2align 6
.LBB0_3:
s_sleep 1
s_clause 0x1
global_load_b64 v[1:2], v0, s[2:3] offset:40
global_load_b64 v[10:11], v0, s[2:3]
v_dual_mov_b32 v9, v7 :: v_dual_mov_b32 v8, v6
s_waitcnt vmcnt(1)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_and_b32_e32 v1, v1, v8
v_and_b32_e32 v7, v2, v9
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[5:6], null, v1, 24, v[10:11]
v_mov_b32_e32 v1, v6
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[2:3], null, v7, 24, v[1:2]
v_mov_b32_e32 v6, v2
global_load_b64 v[6:7], v[5:6], off glc
s_waitcnt vmcnt(0)
global_atomic_cmpswap_b64 v[6:7], v0, v[6:9], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
v_cmp_eq_u64_e32 vcc_lo, v[6:7], v[8:9]
s_or_b32 s5, vcc_lo, s5
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s5
s_cbranch_execnz .LBB0_3
s_or_b32 exec_lo, exec_lo, s5
.LBB0_5:
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 exec_lo, exec_lo, s4
.LBB0_6:
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 exec_lo, exec_lo, s1
v_mov_b32_e32 v5, 0
v_readfirstlane_b32 s4, v6
v_readfirstlane_b32 s5, v7
s_mov_b32 s8, exec_lo
s_waitcnt lgkmcnt(0)
s_clause 0x1
global_load_b64 v[8:9], v5, s[2:3] offset:40
global_load_b128 v[0:3], v5, s[2:3]
s_waitcnt vmcnt(1)
v_readfirstlane_b32 s6, v8
v_readfirstlane_b32 s7, v9
s_delay_alu instid0(VALU_DEP_1)
s_and_b64 s[6:7], s[4:5], s[6:7]
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_8
v_dual_mov_b32 v6, s8 :: v_dual_mov_b32 v7, 0
s_mul_i32 s8, s7, 24
s_mul_hi_u32 s9, s6, 24
v_dual_mov_b32 v8, 2 :: v_dual_mov_b32 v9, 1
s_add_i32 s9, s9, s8
s_mul_i32 s8, s6, 24
s_waitcnt vmcnt(0)
v_add_co_u32 v10, vcc_lo, v0, s8
v_add_co_ci_u32_e32 v11, vcc_lo, s9, v1, vcc_lo
global_store_b128 v[10:11], v[6:9], off offset:8
.LBB0_8:
s_or_b32 exec_lo, exec_lo, s1
s_lshl_b64 s[8:9], s[6:7], 12
v_lshlrev_b64 v[4:5], 6, v[4:5]
s_waitcnt vmcnt(0)
v_add_co_u32 v2, vcc_lo, v2, s8
v_add_co_ci_u32_e32 v7, vcc_lo, s9, v3, vcc_lo
v_mov_b32_e32 v3, 0
s_mov_b32 s8, 0
s_delay_alu instid0(VALU_DEP_3)
v_add_co_u32 v6, vcc_lo, v2, v4
v_mov_b32_e32 v2, 33
s_mov_b32 s9, s8
s_mov_b32 s10, s8
s_mov_b32 s11, s8
v_add_co_ci_u32_e32 v7, vcc_lo, v7, v5, vcc_lo
v_mov_b32_e32 v4, v3
v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v8, s8
v_dual_mov_b32 v9, s9 :: v_dual_mov_b32 v10, s10
v_mov_b32_e32 v11, s11
s_clause 0x3
global_store_b128 v[6:7], v[2:5], off
global_store_b128 v[6:7], v[8:11], off offset:16
global_store_b128 v[6:7], v[8:11], off offset:32
global_store_b128 v[6:7], v[8:11], off offset:48
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_16
v_dual_mov_b32 v10, 0 :: v_dual_mov_b32 v11, s4
v_mov_b32_e32 v12, s5
s_clause 0x1
global_load_b64 v[13:14], v10, s[2:3] offset:32 glc
global_load_b64 v[2:3], v10, s[2:3] offset:40
s_waitcnt vmcnt(0)
v_readfirstlane_b32 s8, v2
v_readfirstlane_b32 s9, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b64 s[8:9], s[8:9], s[4:5]
s_mul_i32 s9, s9, 24
s_mul_hi_u32 s10, s8, 24
s_mul_i32 s8, s8, 24
s_add_i32 s10, s10, s9
v_add_co_u32 v8, vcc_lo, v0, s8
v_add_co_ci_u32_e32 v9, vcc_lo, s10, v1, vcc_lo
s_mov_b32 s8, exec_lo
global_store_b64 v[8:9], v[13:14], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[4:5], v10, v[11:14], s[2:3] offset:32 glc
s_waitcnt vmcnt(0)
v_cmpx_ne_u64_e64 v[4:5], v[13:14]
s_cbranch_execz .LBB0_12
s_mov_b32 s9, 0
.LBB0_11:
v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
s_sleep 1
global_store_b64 v[8:9], v[4:5], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[2:3], v10, v[2:5], s[2:3] offset:32 glc
s_waitcnt vmcnt(0)
v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
s_or_b32 s9, vcc_lo, s9
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s9
s_cbranch_execnz .LBB0_11
.LBB0_12:
s_or_b32 exec_lo, exec_lo, s8
v_mov_b32_e32 v2, 0
s_mov_b32 s9, exec_lo
s_mov_b32 s8, exec_lo
v_mbcnt_lo_u32_b32 v4, s9, 0
global_load_b64 v[2:3], v2, s[2:3] offset:16
v_cmpx_eq_u32_e32 0, v4
s_cbranch_execz .LBB0_14
s_bcnt1_i32_b32 s9, s9
s_delay_alu instid0(SALU_CYCLE_1)
v_dual_mov_b32 v5, 0 :: v_dual_mov_b32 v4, s9
s_waitcnt vmcnt(0)
global_atomic_add_u64 v[2:3], v[4:5], off offset:8
.LBB0_14:
s_or_b32 exec_lo, exec_lo, s8
s_waitcnt vmcnt(0)
global_load_b64 v[4:5], v[2:3], off offset:16
s_waitcnt vmcnt(0)
v_cmp_eq_u64_e32 vcc_lo, 0, v[4:5]
s_cbranch_vccnz .LBB0_16
global_load_b32 v2, v[2:3], off offset:24
v_mov_b32_e32 v3, 0
s_waitcnt vmcnt(0)
v_readfirstlane_b32 s8, v2
s_waitcnt_vscnt null, 0x0
global_store_b64 v[4:5], v[2:3], off
s_and_b32 m0, s8, 0xff
s_sendmsg sendmsg(MSG_INTERRUPT)
.LBB0_16:
s_or_b32 exec_lo, exec_lo, s1
s_mul_i32 s1, s7, 24
s_mul_hi_u32 s7, s6, 24
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
s_add_i32 s7, s7, s1
s_mul_i32 s1, s6, 24
v_add_co_u32 v0, vcc_lo, v0, s1
v_add_co_ci_u32_e32 v1, vcc_lo, s7, v1, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, v0, 20
v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo
s_branch .LBB0_20
.p2align 6
.LBB0_17:
s_or_b32 exec_lo, exec_lo, s1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_readfirstlane_b32 s1, v2
s_cmp_eq_u32 s1, 0
s_cbranch_scc1 .LBB0_19
s_sleep 1
s_cbranch_execnz .LBB0_20
s_branch .LBB0_22
.p2align 6
.LBB0_19:
s_branch .LBB0_22
.LBB0_20:
v_mov_b32_e32 v2, 1
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_17
global_load_b32 v2, v[0:1], off glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
v_and_b32_e32 v2, 1, v2
s_branch .LBB0_17
.LBB0_22:
global_load_b64 v[22:23], v[6:7], off
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_26
v_mov_b32_e32 v6, 0
s_clause 0x2
global_load_b64 v[2:3], v6, s[2:3] offset:40
global_load_b64 v[7:8], v6, s[2:3] offset:24 glc
global_load_b64 v[4:5], v6, s[2:3]
s_waitcnt vmcnt(2)
v_add_co_u32 v9, vcc_lo, v2, 1
v_add_co_ci_u32_e32 v10, vcc_lo, 0, v3, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, v9, s4
v_add_co_ci_u32_e32 v1, vcc_lo, s5, v10, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1]
v_dual_cndmask_b32 v1, v1, v10 :: v_dual_cndmask_b32 v0, v0, v9
v_and_b32_e32 v3, v1, v3
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_and_b32_e32 v2, v0, v2
v_mul_lo_u32 v3, v3, 24
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_mul_hi_u32 v9, v2, 24
v_mul_lo_u32 v2, v2, 24
v_add_nc_u32_e32 v3, v9, v3
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_add_co_u32 v4, vcc_lo, v4, v2
v_mov_b32_e32 v2, v7
v_add_co_ci_u32_e32 v5, vcc_lo, v5, v3, vcc_lo
v_mov_b32_e32 v3, v8
global_store_b64 v[4:5], v[7:8], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[2:3], v6, v[0:3], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
v_cmp_ne_u64_e32 vcc_lo, v[2:3], v[7:8]
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB0_26
s_mov_b32 s0, 0
.LBB0_25:
s_sleep 1
global_store_b64 v[4:5], v[2:3], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[7:8], v6, v[0:3], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
v_cmp_eq_u64_e32 vcc_lo, v[7:8], v[2:3]
v_dual_mov_b32 v2, v7 :: v_dual_mov_b32 v3, v8
s_or_b32 s0, vcc_lo, s0
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s0
s_cbranch_execnz .LBB0_25
.LBB0_26:
s_or_b32 exec_lo, exec_lo, s1
s_getpc_b64 s[4:5]
s_add_u32 s4, s4, .str@rel32@lo+4
s_addc_u32 s5, s5, .str@rel32@hi+12
s_mov_b32 s0, -1
s_cmp_lg_u64 s[4:5], 0
s_cbranch_scc0 .LBB0_105
s_waitcnt vmcnt(0)
v_dual_mov_b32 v1, v23 :: v_dual_and_b32 v0, -3, v22
v_mov_b32_e32 v25, 0
s_mov_b64 s[6:7], 54
s_branch .LBB0_29
.LBB0_28:
s_or_b32 exec_lo, exec_lo, s1
s_sub_u32 s6, s6, s8
s_subb_u32 s7, s7, s9
s_add_u32 s4, s4, s8
s_addc_u32 s5, s5, s9
s_cmp_lg_u64 s[6:7], 0
s_cbranch_scc0 .LBB0_104
.LBB0_29:
v_cmp_lt_u64_e64 s0, s[6:7], 56
s_delay_alu instid0(VALU_DEP_1)
s_and_b32 s0, s0, exec_lo
s_cselect_b32 s8, s6, 56
s_cselect_b32 s9, s7, 0
s_cmp_gt_u32 s8, 7
s_mov_b32 s0, -1
s_cbranch_scc1 .LBB0_34
v_mov_b32_e32 v2, 0
v_mov_b32_e32 v3, 0
s_cmp_eq_u32 s8, 0
s_cbranch_scc1 .LBB0_33
s_lshl_b64 s[0:1], s[8:9], 3
s_mov_b64 s[10:11], 0
s_mov_b64 s[12:13], s[4:5]
.LBB0_32:
global_load_u8 v4, v25, s[12:13]
s_waitcnt vmcnt(0)
v_and_b32_e32 v24, 0xffff, v4
s_delay_alu instid0(VALU_DEP_1)
v_lshlrev_b64 v[4:5], s10, v[24:25]
s_add_u32 s10, s10, 8
s_addc_u32 s11, s11, 0
s_add_u32 s12, s12, 1
s_addc_u32 s13, s13, 0
s_cmp_lg_u32 s0, s10
v_or_b32_e32 v2, v4, v2
v_or_b32_e32 v3, v5, v3
s_cbranch_scc1 .LBB0_32
.LBB0_33:
s_mov_b32 s0, 0
s_mov_b32 s15, 0
.LBB0_34:
s_and_not1_b32 vcc_lo, exec_lo, s0
s_mov_b64 s[0:1], s[4:5]
s_cbranch_vccnz .LBB0_36
global_load_b64 v[2:3], v25, s[4:5]
s_add_i32 s15, s8, -8
s_add_u32 s0, s4, 8
s_addc_u32 s1, s5, 0
.LBB0_36:
s_cmp_gt_u32 s15, 7
s_cbranch_scc1 .LBB0_41
v_mov_b32_e32 v4, 0
v_mov_b32_e32 v5, 0
s_cmp_eq_u32 s15, 0
s_cbranch_scc1 .LBB0_40
s_mov_b64 s[10:11], 0
s_mov_b64 s[12:13], 0
.LBB0_39:
s_delay_alu instid0(SALU_CYCLE_1)
s_add_u32 s16, s0, s12
s_addc_u32 s17, s1, s13
s_add_u32 s12, s12, 1
global_load_u8 v6, v25, s[16:17]
s_addc_u32 s13, s13, 0
s_waitcnt vmcnt(0)
v_and_b32_e32 v24, 0xffff, v6
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[6:7], s10, v[24:25]
s_add_u32 s10, s10, 8
s_addc_u32 s11, s11, 0
s_cmp_lg_u32 s15, s12
v_or_b32_e32 v4, v6, v4
s_delay_alu instid0(VALU_DEP_2)
v_or_b32_e32 v5, v7, v5
s_cbranch_scc1 .LBB0_39
.LBB0_40:
s_mov_b32 s14, 0
s_cbranch_execz .LBB0_42
s_branch .LBB0_43
.LBB0_41:
.LBB0_42:
global_load_b64 v[4:5], v25, s[0:1]
s_add_i32 s14, s15, -8
s_add_u32 s0, s0, 8
s_addc_u32 s1, s1, 0
.LBB0_43:
s_cmp_gt_u32 s14, 7
s_cbranch_scc1 .LBB0_48
v_mov_b32_e32 v6, 0
v_mov_b32_e32 v7, 0
s_cmp_eq_u32 s14, 0
s_cbranch_scc1 .LBB0_47
s_mov_b64 s[10:11], 0
s_mov_b64 s[12:13], 0
.LBB0_46:
s_delay_alu instid0(SALU_CYCLE_1)
s_add_u32 s16, s0, s12
s_addc_u32 s17, s1, s13
s_add_u32 s12, s12, 1
global_load_u8 v8, v25, s[16:17]
s_addc_u32 s13, s13, 0
s_waitcnt vmcnt(0)
v_and_b32_e32 v24, 0xffff, v8
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[8:9], s10, v[24:25]
s_add_u32 s10, s10, 8
s_addc_u32 s11, s11, 0
s_cmp_lg_u32 s14, s12
v_or_b32_e32 v6, v8, v6
s_delay_alu instid0(VALU_DEP_2)
v_or_b32_e32 v7, v9, v7
s_cbranch_scc1 .LBB0_46
.LBB0_47:
s_mov_b32 s15, 0
s_cbranch_execz .LBB0_49
s_branch .LBB0_50
.LBB0_48:
.LBB0_49:
global_load_b64 v[6:7], v25, s[0:1]
s_add_i32 s15, s14, -8
s_add_u32 s0, s0, 8
s_addc_u32 s1, s1, 0
.LBB0_50:
s_cmp_gt_u32 s15, 7
s_cbranch_scc1 .LBB0_55
v_mov_b32_e32 v8, 0
v_mov_b32_e32 v9, 0
s_cmp_eq_u32 s15, 0
s_cbranch_scc1 .LBB0_54
s_mov_b64 s[10:11], 0
s_mov_b64 s[12:13], 0
.LBB0_53:
s_delay_alu instid0(SALU_CYCLE_1)
s_add_u32 s16, s0, s12
s_addc_u32 s17, s1, s13
s_add_u32 s12, s12, 1
global_load_u8 v10, v25, s[16:17]
s_addc_u32 s13, s13, 0
s_waitcnt vmcnt(0)
v_and_b32_e32 v24, 0xffff, v10
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[10:11], s10, v[24:25]
s_add_u32 s10, s10, 8
s_addc_u32 s11, s11, 0
s_cmp_lg_u32 s15, s12
v_or_b32_e32 v8, v10, v8
s_delay_alu instid0(VALU_DEP_2)
v_or_b32_e32 v9, v11, v9
s_cbranch_scc1 .LBB0_53
.LBB0_54:
s_mov_b32 s14, 0
s_cbranch_execz .LBB0_56
s_branch .LBB0_57
.LBB0_55:
.LBB0_56:
global_load_b64 v[8:9], v25, s[0:1]
s_add_i32 s14, s15, -8
s_add_u32 s0, s0, 8
s_addc_u32 s1, s1, 0
.LBB0_57:
s_cmp_gt_u32 s14, 7
s_cbranch_scc1 .LBB0_62
v_mov_b32_e32 v10, 0
v_mov_b32_e32 v11, 0
s_cmp_eq_u32 s14, 0
s_cbranch_scc1 .LBB0_61
s_mov_b64 s[10:11], 0
s_mov_b64 s[12:13], 0
.LBB0_60:
s_delay_alu instid0(SALU_CYCLE_1)
s_add_u32 s16, s0, s12
s_addc_u32 s17, s1, s13
s_add_u32 s12, s12, 1
global_load_u8 v12, v25, s[16:17]
s_addc_u32 s13, s13, 0
s_waitcnt vmcnt(0)
v_and_b32_e32 v24, 0xffff, v12
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[12:13], s10, v[24:25]
s_add_u32 s10, s10, 8
s_addc_u32 s11, s11, 0
s_cmp_lg_u32 s14, s12
v_or_b32_e32 v10, v12, v10
s_delay_alu instid0(VALU_DEP_2)
v_or_b32_e32 v11, v13, v11
s_cbranch_scc1 .LBB0_60
.LBB0_61:
s_mov_b32 s15, 0
s_cbranch_execz .LBB0_63
s_branch .LBB0_64
.LBB0_62:
.LBB0_63:
global_load_b64 v[10:11], v25, s[0:1]
s_add_i32 s15, s14, -8
s_add_u32 s0, s0, 8
s_addc_u32 s1, s1, 0
.LBB0_64:
s_cmp_gt_u32 s15, 7
s_cbranch_scc1 .LBB0_69
v_mov_b32_e32 v12, 0
v_mov_b32_e32 v13, 0
s_cmp_eq_u32 s15, 0
s_cbranch_scc1 .LBB0_68
s_mov_b64 s[10:11], 0
s_mov_b64 s[12:13], 0
.LBB0_67:
s_delay_alu instid0(SALU_CYCLE_1)
s_add_u32 s16, s0, s12
s_addc_u32 s17, s1, s13
s_add_u32 s12, s12, 1
global_load_u8 v14, v25, s[16:17]
s_addc_u32 s13, s13, 0
s_waitcnt vmcnt(0)
v_and_b32_e32 v24, 0xffff, v14
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[14:15], s10, v[24:25]
s_add_u32 s10, s10, 8
s_addc_u32 s11, s11, 0
s_cmp_lg_u32 s15, s12
v_or_b32_e32 v12, v14, v12
s_delay_alu instid0(VALU_DEP_2)
v_or_b32_e32 v13, v15, v13
s_cbranch_scc1 .LBB0_67
.LBB0_68:
s_mov_b32 s14, 0
s_cbranch_execz .LBB0_70
s_branch .LBB0_71
.LBB0_69:
.LBB0_70:
global_load_b64 v[12:13], v25, s[0:1]
s_add_i32 s14, s15, -8
s_add_u32 s0, s0, 8
s_addc_u32 s1, s1, 0
.LBB0_71:
s_cmp_gt_u32 s14, 7
s_cbranch_scc1 .LBB0_76
v_mov_b32_e32 v14, 0
v_mov_b32_e32 v15, 0
s_cmp_eq_u32 s14, 0
s_cbranch_scc1 .LBB0_75
s_mov_b64 s[10:11], 0
s_mov_b64 s[12:13], s[0:1]
.LBB0_74:
global_load_u8 v16, v25, s[12:13]
s_add_i32 s14, s14, -1
s_waitcnt vmcnt(0)
v_and_b32_e32 v24, 0xffff, v16
s_delay_alu instid0(VALU_DEP_1)
v_lshlrev_b64 v[16:17], s10, v[24:25]
s_add_u32 s10, s10, 8
s_addc_u32 s11, s11, 0
s_add_u32 s12, s12, 1
s_addc_u32 s13, s13, 0
s_cmp_lg_u32 s14, 0
v_or_b32_e32 v14, v16, v14
v_or_b32_e32 v15, v17, v15
s_cbranch_scc1 .LBB0_74
.LBB0_75:
s_cbranch_execz .LBB0_77
s_branch .LBB0_78
.LBB0_76:
.LBB0_77:
global_load_b64 v[14:15], v25, s[0:1]
.LBB0_78:
v_mov_b32_e32 v24, v20
v_mov_b32_e32 v26, 0
v_mov_b32_e32 v27, 0
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_readfirstlane_b32 s0, v24
v_cmp_eq_u32_e64 s0, s0, v24
s_delay_alu instid0(VALU_DEP_1)
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_84
global_load_b64 v[18:19], v25, s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
s_clause 0x1
global_load_b64 v[16:17], v25, s[2:3] offset:40
global_load_b64 v[26:27], v25, s[2:3]
s_mov_b32 s10, exec_lo
s_waitcnt vmcnt(1)
v_and_b32_e32 v17, v17, v19
v_and_b32_e32 v16, v16, v18
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mul_lo_u32 v17, v17, 24
v_mul_hi_u32 v21, v16, 24
v_mul_lo_u32 v16, v16, 24
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v17, v21, v17
s_waitcnt vmcnt(0)
v_add_co_u32 v16, vcc_lo, v26, v16
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v17, vcc_lo, v27, v17, vcc_lo
global_load_b64 v[16:17], v[16:17], off glc
s_waitcnt vmcnt(0)
global_atomic_cmpswap_b64 v[26:27], v25, v[16:19], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
v_cmpx_ne_u64_e64 v[26:27], v[18:19]
s_cbranch_execz .LBB0_83
s_mov_b32 s11, 0
.p2align 6
.LBB0_81:
s_sleep 1
s_clause 0x1
global_load_b64 v[16:17], v25, s[2:3] offset:40
global_load_b64 v[28:29], v25, s[2:3]
v_dual_mov_b32 v18, v26 :: v_dual_mov_b32 v19, v27
s_waitcnt vmcnt(1)
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_and_b32_e32 v16, v16, v18
s_waitcnt vmcnt(0)
v_mad_u64_u32 v[26:27], null, v16, 24, v[28:29]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_dual_mov_b32 v16, v27 :: v_dual_and_b32 v17, v17, v19
v_mad_u64_u32 v[27:28], null, v17, 24, v[16:17]
global_load_b64 v[16:17], v[26:27], off glc
s_waitcnt vmcnt(0)
global_atomic_cmpswap_b64 v[26:27], v25, v[16:19], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
v_cmp_eq_u64_e32 vcc_lo, v[26:27], v[18:19]
s_or_b32 s11, vcc_lo, s11
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s11
s_cbranch_execnz .LBB0_81
s_or_b32 exec_lo, exec_lo, s11
.LBB0_83:
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 exec_lo, exec_lo, s10
.LBB0_84:
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 exec_lo, exec_lo, s1
s_clause 0x1
global_load_b64 v[28:29], v25, s[2:3] offset:40
global_load_b128 v[16:19], v25, s[2:3]
v_readfirstlane_b32 s10, v26
v_readfirstlane_b32 s11, v27
s_mov_b32 s14, exec_lo
s_waitcnt vmcnt(1)
v_readfirstlane_b32 s12, v28
v_readfirstlane_b32 s13, v29
s_delay_alu instid0(VALU_DEP_1)
s_and_b64 s[12:13], s[10:11], s[12:13]
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_86
v_dual_mov_b32 v26, s14 :: v_dual_mov_b32 v27, 0
s_mul_i32 s14, s13, 24
s_mul_hi_u32 s15, s12, 24
v_dual_mov_b32 v28, 2 :: v_dual_mov_b32 v29, 1
s_add_i32 s15, s15, s14
s_mul_i32 s14, s12, 24
s_waitcnt vmcnt(0)
v_add_co_u32 v30, vcc_lo, v16, s14
v_add_co_ci_u32_e32 v31, vcc_lo, s15, v17, vcc_lo
global_store_b128 v[30:31], v[26:29], off offset:8
.LBB0_86:
s_or_b32 exec_lo, exec_lo, s1
v_cmp_gt_u64_e64 vcc_lo, s[6:7], 56
v_or_b32_e32 v21, 2, v0
s_lshl_b64 s[14:15], s[12:13], 12
v_lshlrev_b64 v[26:27], 6, v[24:25]
s_lshl_b32 s1, s8, 2
s_delay_alu instid0(SALU_CYCLE_1)
s_add_i32 s1, s1, 28
v_cndmask_b32_e32 v0, v21, v0, vcc_lo
s_waitcnt vmcnt(0)
v_add_co_u32 v18, vcc_lo, v18, s14
v_add_co_ci_u32_e32 v19, vcc_lo, s15, v19, vcc_lo
s_and_b32 s1, s1, 0x1e0
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_add_co_u32 v18, vcc_lo, v18, v26
v_and_or_b32 v0, v0, 0xffffff1f, s1
v_add_co_ci_u32_e32 v19, vcc_lo, v19, v27, vcc_lo
s_clause 0x3
global_store_b128 v[18:19], v[0:3], off
global_store_b128 v[18:19], v[4:7], off offset:16
global_store_b128 v[18:19], v[8:11], off offset:32
global_store_b128 v[18:19], v[12:15], off offset:48
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_94
s_clause 0x1
global_load_b64 v[8:9], v25, s[2:3] offset:32 glc
global_load_b64 v[0:1], v25, s[2:3] offset:40
v_dual_mov_b32 v6, s10 :: v_dual_mov_b32 v7, s11
s_waitcnt vmcnt(0)
v_readfirstlane_b32 s14, v0
v_readfirstlane_b32 s15, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b64 s[14:15], s[14:15], s[10:11]
s_mul_i32 s15, s15, 24
s_mul_hi_u32 s16, s14, 24
s_mul_i32 s14, s14, 24
s_add_i32 s16, s16, s15
v_add_co_u32 v4, vcc_lo, v16, s14
v_add_co_ci_u32_e32 v5, vcc_lo, s16, v17, vcc_lo
s_mov_b32 s14, exec_lo
global_store_b64 v[4:5], v[8:9], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[2:3], v25, v[6:9], s[2:3] offset:32 glc
s_waitcnt vmcnt(0)
v_cmpx_ne_u64_e64 v[2:3], v[8:9]
s_cbranch_execz .LBB0_90
s_mov_b32 s15, 0
.LBB0_89:
v_dual_mov_b32 v0, s10 :: v_dual_mov_b32 v1, s11
s_sleep 1
global_store_b64 v[4:5], v[2:3], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[0:1], v25, v[0:3], s[2:3] offset:32 glc
s_waitcnt vmcnt(0)
v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
s_or_b32 s15, vcc_lo, s15
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s15
s_cbranch_execnz .LBB0_89
.LBB0_90:
s_or_b32 exec_lo, exec_lo, s14
global_load_b64 v[0:1], v25, s[2:3] offset:16
s_mov_b32 s15, exec_lo
s_mov_b32 s14, exec_lo
v_mbcnt_lo_u32_b32 v2, s15, 0
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_eq_u32_e32 0, v2
s_cbranch_execz .LBB0_92
s_bcnt1_i32_b32 s15, s15
s_delay_alu instid0(SALU_CYCLE_1)
v_dual_mov_b32 v3, 0 :: v_dual_mov_b32 v2, s15
s_waitcnt vmcnt(0)
global_atomic_add_u64 v[0:1], v[2:3], off offset:8
.LBB0_92:
s_or_b32 exec_lo, exec_lo, s14
s_waitcnt vmcnt(0)
global_load_b64 v[2:3], v[0:1], off offset:16
s_waitcnt vmcnt(0)
v_cmp_eq_u64_e32 vcc_lo, 0, v[2:3]
s_cbranch_vccnz .LBB0_94
global_load_b32 v24, v[0:1], off offset:24
s_waitcnt vmcnt(0)
v_readfirstlane_b32 s14, v24
s_waitcnt_vscnt null, 0x0
global_store_b64 v[2:3], v[24:25], off
s_and_b32 m0, s14, 0xff
s_sendmsg sendmsg(MSG_INTERRUPT)
.LBB0_94:
s_or_b32 exec_lo, exec_lo, s1
s_mul_i32 s1, s13, 24
s_mul_hi_u32 s13, s12, 24
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
s_add_i32 s13, s13, s1
s_mul_i32 s1, s12, 24
v_add_co_u32 v0, vcc_lo, v16, s1
v_add_co_ci_u32_e32 v1, vcc_lo, s13, v17, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, v0, 20
v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo
s_branch .LBB0_98
.p2align 6
.LBB0_95:
s_or_b32 exec_lo, exec_lo, s1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_readfirstlane_b32 s1, v2
s_cmp_eq_u32 s1, 0
s_cbranch_scc1 .LBB0_97
s_sleep 1
s_cbranch_execnz .LBB0_98
s_branch .LBB0_100
.p2align 6
.LBB0_97:
s_branch .LBB0_100
.LBB0_98:
v_mov_b32_e32 v2, 1
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_95
global_load_b32 v2, v[0:1], off glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
v_and_b32_e32 v2, 1, v2
s_branch .LBB0_95
.LBB0_100:
global_load_b64 v[0:1], v[18:19], off
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_28
s_clause 0x2
global_load_b64 v[4:5], v25, s[2:3] offset:40
global_load_b64 v[8:9], v25, s[2:3] offset:24 glc
global_load_b64 v[6:7], v25, s[2:3]
s_waitcnt vmcnt(2)
v_add_co_u32 v10, vcc_lo, v4, 1
v_add_co_ci_u32_e32 v11, vcc_lo, 0, v5, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, v10, s10
v_add_co_ci_u32_e32 v3, vcc_lo, s11, v11, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_cmp_eq_u64_e32 vcc_lo, 0, v[2:3]
v_dual_cndmask_b32 v3, v3, v11 :: v_dual_cndmask_b32 v2, v2, v10
v_and_b32_e32 v5, v3, v5
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_and_b32_e32 v4, v2, v4
v_mul_hi_u32 v10, v4, 24
v_mul_lo_u32 v4, v4, 24
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_add_co_u32 v6, vcc_lo, v6, v4
v_mov_b32_e32 v4, v8
v_mul_lo_u32 v5, v5, 24
v_add_nc_u32_e32 v5, v10, v5
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e32 v7, vcc_lo, v7, v5, vcc_lo
v_mov_b32_e32 v5, v9
global_store_b64 v[6:7], v[8:9], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[4:5], v25, v[2:5], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
v_cmp_ne_u64_e32 vcc_lo, v[4:5], v[8:9]
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB0_28
s_mov_b32 s0, 0
.LBB0_103:
s_sleep 1
global_store_b64 v[6:7], v[4:5], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[8:9], v25, v[2:5], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
v_cmp_eq_u64_e32 vcc_lo, v[8:9], v[4:5]
v_dual_mov_b32 v4, v8 :: v_dual_mov_b32 v5, v9
s_or_b32 s0, vcc_lo, s0
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s0
s_cbranch_execnz .LBB0_103
s_branch .LBB0_28
.LBB0_104:
s_mov_b32 s0, 0
.LBB0_105:
s_delay_alu instid0(SALU_CYCLE_1)
s_and_b32 vcc_lo, exec_lo, s0
s_cbranch_vccz .LBB0_132
v_readfirstlane_b32 s0, v20
v_mov_b32_e32 v4, 0
v_mov_b32_e32 v5, 0
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_eq_u32_e64 s0, s0, v20
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_112
s_waitcnt vmcnt(0)
v_mov_b32_e32 v0, 0
s_mov_b32 s4, exec_lo
global_load_b64 v[6:7], v0, s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
s_clause 0x1
global_load_b64 v[1:2], v0, s[2:3] offset:40
global_load_b64 v[3:4], v0, s[2:3]
s_waitcnt vmcnt(1)
v_and_b32_e32 v1, v1, v6
v_and_b32_e32 v2, v2, v7
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mul_hi_u32 v5, v1, 24
v_mul_lo_u32 v2, v2, 24
v_mul_lo_u32 v1, v1, 24
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v2, v5, v2
s_waitcnt vmcnt(0)
v_add_co_u32 v1, vcc_lo, v3, v1
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v2, vcc_lo, v4, v2, vcc_lo
global_load_b64 v[4:5], v[1:2], off glc
s_waitcnt vmcnt(0)
global_atomic_cmpswap_b64 v[4:5], v0, v[4:7], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
v_cmpx_ne_u64_e64 v[4:5], v[6:7]
s_cbranch_execz .LBB0_111
s_mov_b32 s5, 0
.p2align 6
.LBB0_109:
s_sleep 1
s_clause 0x1
global_load_b64 v[1:2], v0, s[2:3] offset:40
global_load_b64 v[8:9], v0, s[2:3]
v_dual_mov_b32 v7, v5 :: v_dual_mov_b32 v6, v4
s_waitcnt vmcnt(1)
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_and_b32_e32 v1, v1, v6
s_waitcnt vmcnt(0)
v_mad_u64_u32 v[3:4], null, v1, 24, v[8:9]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_dual_mov_b32 v1, v4 :: v_dual_and_b32 v2, v2, v7
v_mad_u64_u32 v[4:5], null, v2, 24, v[1:2]
global_load_b64 v[4:5], v[3:4], off glc
s_waitcnt vmcnt(0)
global_atomic_cmpswap_b64 v[4:5], v0, v[4:7], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
v_cmp_eq_u64_e32 vcc_lo, v[4:5], v[6:7]
s_or_b32 s5, vcc_lo, s5
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s5
s_cbranch_execnz .LBB0_109
s_or_b32 exec_lo, exec_lo, s5
.LBB0_111:
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 exec_lo, exec_lo, s4
.LBB0_112:
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 exec_lo, exec_lo, s1
v_mov_b32_e32 v21, 0
v_readfirstlane_b32 s4, v4
v_readfirstlane_b32 s5, v5
s_mov_b32 s8, exec_lo
s_clause 0x1
global_load_b64 v[6:7], v21, s[2:3] offset:40
global_load_b128 v[0:3], v21, s[2:3]
s_waitcnt vmcnt(1)
v_readfirstlane_b32 s6, v6
v_readfirstlane_b32 s7, v7
s_delay_alu instid0(VALU_DEP_1)
s_and_b64 s[6:7], s[4:5], s[6:7]
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_114
v_dual_mov_b32 v4, s8 :: v_dual_mov_b32 v5, 0
s_mul_i32 s8, s7, 24
s_mul_hi_u32 s9, s6, 24
v_dual_mov_b32 v6, 2 :: v_dual_mov_b32 v7, 1
s_add_i32 s9, s9, s8
s_mul_i32 s8, s6, 24
s_waitcnt vmcnt(0)
v_add_co_u32 v8, vcc_lo, v0, s8
v_add_co_ci_u32_e32 v9, vcc_lo, s9, v1, vcc_lo
global_store_b128 v[8:9], v[4:7], off offset:8
.LBB0_114:
s_or_b32 exec_lo, exec_lo, s1
s_lshl_b64 s[8:9], s[6:7], 12
v_and_or_b32 v22, v22, 0xffffff1d, 34
s_waitcnt vmcnt(0)
v_add_co_u32 v4, vcc_lo, v2, s8
v_add_co_ci_u32_e32 v5, vcc_lo, s9, v3, vcc_lo
v_lshlrev_b64 v[2:3], 6, v[20:21]
s_mov_b32 s8, 0
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
s_mov_b32 s9, s8
s_mov_b32 s10, s8
s_mov_b32 s11, s8
v_add_co_u32 v8, vcc_lo, v4, v2
v_mov_b32_e32 v6, 0
v_add_co_ci_u32_e32 v9, vcc_lo, v5, v3, vcc_lo
v_dual_mov_b32 v2, s8 :: v_dual_mov_b32 v5, s11
v_dual_mov_b32 v3, s9 :: v_dual_mov_b32 v4, s10
s_delay_alu instid0(VALU_DEP_4)
v_mov_b32_e32 v7, v6
s_clause 0x4
global_store_b64 v[8:9], v[22:23], off
global_store_b128 v[8:9], v[2:5], off offset:8
global_store_b128 v[8:9], v[2:5], off offset:24
global_store_b128 v[8:9], v[2:5], off offset:40
global_store_b64 v[8:9], v[6:7], off offset:56
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_122
v_dual_mov_b32 v8, 0 :: v_dual_mov_b32 v9, s4
v_mov_b32_e32 v10, s5
s_clause 0x1
global_load_b64 v[11:12], v8, s[2:3] offset:32 glc
global_load_b64 v[2:3], v8, s[2:3] offset:40
s_waitcnt vmcnt(0)
v_readfirstlane_b32 s8, v2
v_readfirstlane_b32 s9, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b64 s[8:9], s[8:9], s[4:5]
s_mul_i32 s9, s9, 24
s_mul_hi_u32 s10, s8, 24
s_mul_i32 s8, s8, 24
s_add_i32 s10, s10, s9
v_add_co_u32 v6, vcc_lo, v0, s8
v_add_co_ci_u32_e32 v7, vcc_lo, s10, v1, vcc_lo
s_mov_b32 s8, exec_lo
global_store_b64 v[6:7], v[11:12], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[4:5], v8, v[9:12], s[2:3] offset:32 glc
s_waitcnt vmcnt(0)
v_cmpx_ne_u64_e64 v[4:5], v[11:12]
s_cbranch_execz .LBB0_118
s_mov_b32 s9, 0
.LBB0_117:
v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
s_sleep 1
global_store_b64 v[6:7], v[4:5], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[2:3], v8, v[2:5], s[2:3] offset:32 glc
s_waitcnt vmcnt(0)
v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
s_or_b32 s9, vcc_lo, s9
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s9
s_cbranch_execnz .LBB0_117
.LBB0_118:
s_or_b32 exec_lo, exec_lo, s8
v_mov_b32_e32 v2, 0
s_mov_b32 s9, exec_lo
s_mov_b32 s8, exec_lo
v_mbcnt_lo_u32_b32 v4, s9, 0
global_load_b64 v[2:3], v2, s[2:3] offset:16
v_cmpx_eq_u32_e32 0, v4
s_cbranch_execz .LBB0_120
s_bcnt1_i32_b32 s9, s9
s_delay_alu instid0(SALU_CYCLE_1)
v_dual_mov_b32 v5, 0 :: v_dual_mov_b32 v4, s9
s_waitcnt vmcnt(0)
global_atomic_add_u64 v[2:3], v[4:5], off offset:8
.LBB0_120:
s_or_b32 exec_lo, exec_lo, s8
s_waitcnt vmcnt(0)
global_load_b64 v[4:5], v[2:3], off offset:16
s_waitcnt vmcnt(0)
v_cmp_eq_u64_e32 vcc_lo, 0, v[4:5]
s_cbranch_vccnz .LBB0_122
global_load_b32 v2, v[2:3], off offset:24
v_mov_b32_e32 v3, 0
s_waitcnt vmcnt(0)
v_readfirstlane_b32 s8, v2
s_waitcnt_vscnt null, 0x0
global_store_b64 v[4:5], v[2:3], off
s_and_b32 m0, s8, 0xff
s_sendmsg sendmsg(MSG_INTERRUPT)
.LBB0_122:
s_or_b32 exec_lo, exec_lo, s1
s_mul_i32 s1, s7, 24
s_mul_hi_u32 s7, s6, 24
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
s_add_i32 s7, s7, s1
s_mul_i32 s1, s6, 24
v_add_co_u32 v0, vcc_lo, v0, s1
v_add_co_ci_u32_e32 v1, vcc_lo, s7, v1, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, v0, 20
v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo
s_branch .LBB0_126
.p2align 6
.LBB0_123:
s_or_b32 exec_lo, exec_lo, s1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_readfirstlane_b32 s1, v2
s_cmp_eq_u32 s1, 0
s_cbranch_scc1 .LBB0_125
s_sleep 1
s_cbranch_execnz .LBB0_126
s_branch .LBB0_128
.p2align 6
.LBB0_125:
s_branch .LBB0_128
.LBB0_126:
v_mov_b32_e32 v2, 1
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_123
global_load_b32 v2, v[0:1], off glc
s_waitcnt vmcnt(0)
buffer_gl1_inv
buffer_gl0_inv
v_and_b32_e32 v2, 1, v2
s_branch .LBB0_123
.LBB0_128:
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_132
v_mov_b32_e32 v6, 0
s_clause 0x2
global_load_b64 v[2:3], v6, s[2:3] offset:40
global_load_b64 v[7:8], v6, s[2:3] offset:24 glc
global_load_b64 v[4:5], v6, s[2:3]
s_waitcnt vmcnt(2)
v_add_co_u32 v9, vcc_lo, v2, 1
v_add_co_ci_u32_e32 v10, vcc_lo, 0, v3, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, v9, s4
v_add_co_ci_u32_e32 v1, vcc_lo, s5, v10, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_cmp_eq_u64_e32 vcc_lo, 0, v[0:1]
v_dual_cndmask_b32 v1, v1, v10 :: v_dual_cndmask_b32 v0, v0, v9
v_and_b32_e32 v3, v1, v3
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_and_b32_e32 v2, v0, v2
v_mul_lo_u32 v3, v3, 24
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_mul_hi_u32 v9, v2, 24
v_mul_lo_u32 v2, v2, 24
v_add_nc_u32_e32 v3, v9, v3
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_add_co_u32 v4, vcc_lo, v4, v2
v_mov_b32_e32 v2, v7
v_add_co_ci_u32_e32 v5, vcc_lo, v5, v3, vcc_lo
v_mov_b32_e32 v3, v8
global_store_b64 v[4:5], v[7:8], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[2:3], v6, v[0:3], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
v_cmp_ne_u64_e32 vcc_lo, v[2:3], v[7:8]
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB0_132
s_mov_b32 s0, 0
.LBB0_131:
s_sleep 1
global_store_b64 v[4:5], v[2:3], off
s_waitcnt_vscnt null, 0x0
global_atomic_cmpswap_b64 v[7:8], v6, v[0:3], s[2:3] offset:24 glc
s_waitcnt vmcnt(0)
v_cmp_eq_u64_e32 vcc_lo, v[7:8], v[2:3]
v_dual_mov_b32 v2, v7 :: v_dual_mov_b32 v3, v8
s_or_b32 s0, vcc_lo, s0
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s0
s_cbranch_execnz .LBB0_131
.LBB0_132:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9TheKernelv
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 256
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 32
.amdhsa_next_free_sgpr 18
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z9TheKernelv, .Lfunc_end0-_Z9TheKernelv
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type .str,@object
.section .rodata.str1.1,"aMS",@progbits,1
.str:
.asciz "This is the kernel saying hello world, from the GPU.\n"
.size .str, 54
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: hidden_block_count_x
- .offset: 4
.size: 4
.value_kind: hidden_block_count_y
- .offset: 8
.size: 4
.value_kind: hidden_block_count_z
- .offset: 12
.size: 2
.value_kind: hidden_group_size_x
- .offset: 14
.size: 2
.value_kind: hidden_group_size_y
- .offset: 16
.size: 2
.value_kind: hidden_group_size_z
- .offset: 18
.size: 2
.value_kind: hidden_remainder_x
- .offset: 20
.size: 2
.value_kind: hidden_remainder_y
- .offset: 22
.size: 2
.value_kind: hidden_remainder_z
- .offset: 40
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 48
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 64
.size: 2
.value_kind: hidden_grid_dims
- .offset: 80
.size: 8
.value_kind: hidden_hostcall_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 256
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9TheKernelv
.private_segment_fixed_size: 0
.sgpr_count: 20
.sgpr_spill_count: 0
.symbol: _Z9TheKernelv.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 32
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0016d86d_00000000-6_program_ia.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z27__device_stub__Z9TheKernelvv
.type _Z27__device_stub__Z9TheKernelvv, @function
_Z27__device_stub__Z9TheKernelvv:
.LFB2082:
.cfi_startproc
endbr64
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
leaq 8(%rsp), %rcx
movq %rsp, %rdx
leaq 28(%rsp), %rsi
leaq 16(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 8(%rsp)
.cfi_def_cfa_offset 104
pushq 8(%rsp)
.cfi_def_cfa_offset 112
leaq 80(%rsp), %r9
movq 44(%rsp), %rcx
movl 52(%rsp), %r8d
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
leaq _Z9TheKernelv(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 96
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z27__device_stub__Z9TheKernelvv, .-_Z27__device_stub__Z9TheKernelvv
.globl _Z9TheKernelv
.type _Z9TheKernelv, @function
_Z9TheKernelv:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z27__device_stub__Z9TheKernelvv
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z9TheKernelv, .-_Z9TheKernelv
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "This is the C layer saying hello world, from the host.\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
subq $40, %rsp
.cfi_def_cfa_offset 48
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 8(%rsp)
movl $1, 12(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L14
.L12:
call cudaDeviceSynchronize@PLT
movl $0, %eax
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L14:
.cfi_restore_state
call _Z27__device_stub__Z9TheKernelvv
jmp .L12
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "_Z9TheKernelv"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z9TheKernelv(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "program_ia.hip"
.globl _Z24__device_stub__TheKernelv # -- Begin function _Z24__device_stub__TheKernelv
.p2align 4, 0x90
.type _Z24__device_stub__TheKernelv,@function
_Z24__device_stub__TheKernelv: # @_Z24__device_stub__TheKernelv
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z9TheKernelv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $72, %rsp
.cfi_adjust_cfa_offset -72
retq
.Lfunc_end0:
.size _Z24__device_stub__TheKernelv, .Lfunc_end0-_Z24__device_stub__TheKernelv
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
movl $.Lstr, %edi
callq puts@PLT
movabsq $4294967297, %rdi # imm = 0x100000001
movl $1, %esi
movq %rdi, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z9TheKernelv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
callq hipDeviceSynchronize
xorl %eax, %eax
addq $56, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9TheKernelv, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9TheKernelv,@object # @_Z9TheKernelv
.section .rodata,"a",@progbits
.globl _Z9TheKernelv
.p2align 3, 0x0
_Z9TheKernelv:
.quad _Z24__device_stub__TheKernelv
.size _Z9TheKernelv, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z9TheKernelv"
.size .L__unnamed_1, 14
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "This is the C layer saying hello world, from the host."
.size .Lstr, 55
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__TheKernelv
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9TheKernelv
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | ///*
// * To change this license header, choose License Headers in Project Properties.
// * To change this template file, choose Tools | Templates
// * and open the template in the editor.
// */
//
///*
// * File: LAR_General.h
// * Author: joseph
// *
// * Created on July 23, 2017, 3:24 PM
// */
//
//#include "BLACKCAT_GPU_MATHEMATICS.cuh"
//
////unsigned
////__global__ void GPU_MATHEMATICS::initialize(unsigned*& d, unsigned sz) {
//// d = new unsigned[sz];
////}
////
////unsigned
////__global__ void GPU_MATHEMATICS::destruction(unsigned * d) {
//// delete [] d;
////}
//
//
//__global__ void GPU_MATHEMATICS::copy(unsigned * store, const unsigned * v, unsigned sz) {
// for (int i = 0; i < sz; ++i) {
// store[i] = v[i];
// }
//}
//
//
//__global__ void GPU_MATHEMATICS::fill(unsigned * m, unsigned f, unsigned sz) {
// for (int i = 0; i < sz; ++i) {
// m[i] = f;
// }
//}
//
//
//__global__ void GPU_MATHEMATICS::transpose(unsigned * s, unsigned s_ld, const unsigned * m, unsigned rows, unsigned cols, unsigned m_ld) {
//
// for (unsigned r = 0; r < rows; ++r) {
// for (unsigned c= 0; c< cols; ++c) {
// s[r * s_ld + c] = m[c * m_ld + r];
// }
// }
//}
//
//
//__global__ void GPU_MATHEMATICS::max(unsigned* max_val, const unsigned* data, const unsigned* ranks, const unsigned* ld, unsigned order) {
//
// if (order == 1) {
// for (unsigned i = 0; i < ranks[order-1]; ++i) {
// if (*max_val < data[i]) {
// *max_val = data[i];
// }
// }
// } else {
//
// for (unsigned i = 0; i < ranks[order-1]; ++i) {
// max<<<256,256>>>(max_val, &data[i * ld[order-1]], ranks, ld, order-1);
// }
// }
//}
//
//
//__global__ void GPU_MATHEMATICS::min(unsigned* min_val, const unsigned* data, const unsigned* ranks, const unsigned* ld, unsigned order) {
//
// if (order == 1) {
// for (unsigned i = 0; i < ranks[order-1]; ++i) {
// if (*min_val < data[i]) {
// *min_val = data[i];
// }
// }
// } else {
//
// for (unsigned i = 0; i < ranks[order-1]; ++i) {
// min<<<256,256>>>(min_val, &data[i * ld[order-1]], ranks, ld, order-1);
// }
// }
//}
//
//
//__global__ void GPU_MATHEMATICS::max_index(unsigned* max_val, unsigned* max_indexes, const unsigned* data, const unsigned* ranks, const unsigned* ld, unsigned order) {
//
// if (order == 1) {
// for (unsigned i = 0; i < ranks[order-1]; ++i) {
// if (*max_val < data[i]) {
// *max_val = data[i];
// *max_indexes = i;
// }
// }
// } else {
//
// unsigned curr_max;
// for (unsigned i = 0; i < ranks[order-1]; ++i) {
// curr_max = *max_val;
// max_index<<<256,256>>>(max_val, max_indexes, &data[i * ld[order-1]], ranks, ld, order-1);
//
// //if modified -- update
// if (curr_max != *max_val) {
// max_indexes[order-1] = i;
// }
// }
// }
//}
//
//
//
//__global__ void GPU_MATHEMATICS::min_index(unsigned* min_val, unsigned* min_indexes, const unsigned* data, const unsigned* ranks, const unsigned* ld, unsigned order) {
//
// if (order == 1) {
// for (unsigned i = 0; i < ranks[order-1]; ++i) {
// if (*min_val < data[i]) {
// *min_val = data[i];
// *min_indexes = i;
// }
// }
// } else {
//
// unsigned curr_max;
// for (unsigned i = 0; i < ranks[order-1]; ++i) {
// curr_max = *min_val;
// min_index<<<256,256>>>(min_val, min_indexes, &data[i * ld[order-1]], ranks, ld, order-1);
//
// //if modified -- update
// if (curr_max != *min_val) {
// min_indexes[order-1] = i;
// }
// }
// }
//}
//
//
//
//
//__global__ void GPU_MATHEMATICS::randomize(unsigned * m, unsigned lower_bound, unsigned upper_bound, unsigned sz) {
// for (int i = 0; i < sz; ++i) {
// m[i] = i % 2 == 0 ? 3 : -3 ;
// }
//}
//
//
//__global__ void GPU_MATHEMATICS::randomize(unsigned * m, unsigned* ld, unsigned* ranks, unsigned order, unsigned lower_bound, unsigned upper_bound) {
// --order;
// if (order == 0) {
// randomize<<<256,256>>>(m, lower_bound, upper_bound , ranks[order]);
// } else {
// for (unsigned i = 0; i < ranks[order]; ++i) {
// randomize<<<256,256>>>(&m[ld[order] * i], ld, ranks, order, lower_bound, upper_bound);
// }
// }
//}
//
//void GPU_MATHEMATICS::print(const unsigned* ary, unsigned sz) {
// for (unsigned i = 0; i < sz; ++i) {
// std::cout << ary[i] << " " << std::endl;
// }
//}
//
//
//__global__ void GPU_MATHEMATICS::print(const unsigned* ary, const unsigned* dims, const unsigned* lead_dims, unsigned index) {
//
//
//// if (index < 3) {
//// for (unsigned r = 0; r < dims[0]; ++r) {
////
//// if (r != 0)
//// std::cout << std::endl;
////
//// for (unsigned c = 0; c< dims[1]; ++c) {
//// auto str =std::to_string(ary[r + c * lead_dims[index - 1]]);
//// str = str.substr(0, str.length() < 3 ? str.length() : 3);
//// std::cout << str << " ";
//// }
//// }
//// std::cout << "]" << std::endl << std::endl;
////
//// } else {
//// std::cout << "[";
//// for (unsigned i = 0; i < dims[index - 1]; ++i) {
//// print(&ary[i * lead_dims[index - 1]], dims, lead_dims, index - 1);
//// }
//// }
//} | code for sm_80 |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | ///*
// * To change this license header, choose License Headers in Project Properties.
// * To change this template file, choose Tools | Templates
// * and open the template in the editor.
// */
//
///*
// * File: LAR_General.h
// * Author: joseph
// *
// * Created on July 23, 2017, 3:24 PM
// */
//
//#include "BLACKCAT_GPU_MATHEMATICS.cuh"
//
////unsigned
////__global__ void GPU_MATHEMATICS::initialize(unsigned*& d, unsigned sz) {
//// d = new unsigned[sz];
////}
////
////unsigned
////__global__ void GPU_MATHEMATICS::destruction(unsigned * d) {
//// delete [] d;
////}
//
//
//__global__ void GPU_MATHEMATICS::copy(unsigned * store, const unsigned * v, unsigned sz) {
// for (int i = 0; i < sz; ++i) {
// store[i] = v[i];
// }
//}
//
//
//__global__ void GPU_MATHEMATICS::fill(unsigned * m, unsigned f, unsigned sz) {
// for (int i = 0; i < sz; ++i) {
// m[i] = f;
// }
//}
//
//
//__global__ void GPU_MATHEMATICS::transpose(unsigned * s, unsigned s_ld, const unsigned * m, unsigned rows, unsigned cols, unsigned m_ld) {
//
// for (unsigned r = 0; r < rows; ++r) {
// for (unsigned c= 0; c< cols; ++c) {
// s[r * s_ld + c] = m[c * m_ld + r];
// }
// }
//}
//
//
//__global__ void GPU_MATHEMATICS::max(unsigned* max_val, const unsigned* data, const unsigned* ranks, const unsigned* ld, unsigned order) {
//
// if (order == 1) {
// for (unsigned i = 0; i < ranks[order-1]; ++i) {
// if (*max_val < data[i]) {
// *max_val = data[i];
// }
// }
// } else {
//
// for (unsigned i = 0; i < ranks[order-1]; ++i) {
// max<<<256,256>>>(max_val, &data[i * ld[order-1]], ranks, ld, order-1);
// }
// }
//}
//
//
//__global__ void GPU_MATHEMATICS::min(unsigned* min_val, const unsigned* data, const unsigned* ranks, const unsigned* ld, unsigned order) {
//
// if (order == 1) {
// for (unsigned i = 0; i < ranks[order-1]; ++i) {
// if (*min_val < data[i]) {
// *min_val = data[i];
// }
// }
// } else {
//
// for (unsigned i = 0; i < ranks[order-1]; ++i) {
// min<<<256,256>>>(min_val, &data[i * ld[order-1]], ranks, ld, order-1);
// }
// }
//}
//
//
//__global__ void GPU_MATHEMATICS::max_index(unsigned* max_val, unsigned* max_indexes, const unsigned* data, const unsigned* ranks, const unsigned* ld, unsigned order) {
//
// if (order == 1) {
// for (unsigned i = 0; i < ranks[order-1]; ++i) {
// if (*max_val < data[i]) {
// *max_val = data[i];
// *max_indexes = i;
// }
// }
// } else {
//
// unsigned curr_max;
// for (unsigned i = 0; i < ranks[order-1]; ++i) {
// curr_max = *max_val;
// max_index<<<256,256>>>(max_val, max_indexes, &data[i * ld[order-1]], ranks, ld, order-1);
//
// //if modified -- update
// if (curr_max != *max_val) {
// max_indexes[order-1] = i;
// }
// }
// }
//}
//
//
//
//__global__ void GPU_MATHEMATICS::min_index(unsigned* min_val, unsigned* min_indexes, const unsigned* data, const unsigned* ranks, const unsigned* ld, unsigned order) {
//
// if (order == 1) {
// for (unsigned i = 0; i < ranks[order-1]; ++i) {
// if (*min_val < data[i]) {
// *min_val = data[i];
// *min_indexes = i;
// }
// }
// } else {
//
// unsigned curr_max;
// for (unsigned i = 0; i < ranks[order-1]; ++i) {
// curr_max = *min_val;
// min_index<<<256,256>>>(min_val, min_indexes, &data[i * ld[order-1]], ranks, ld, order-1);
//
// //if modified -- update
// if (curr_max != *min_val) {
// min_indexes[order-1] = i;
// }
// }
// }
//}
//
//
//
//
//__global__ void GPU_MATHEMATICS::randomize(unsigned * m, unsigned lower_bound, unsigned upper_bound, unsigned sz) {
// for (int i = 0; i < sz; ++i) {
// m[i] = i % 2 == 0 ? 3 : -3 ;
// }
//}
//
//
//__global__ void GPU_MATHEMATICS::randomize(unsigned * m, unsigned* ld, unsigned* ranks, unsigned order, unsigned lower_bound, unsigned upper_bound) {
// --order;
// if (order == 0) {
// randomize<<<256,256>>>(m, lower_bound, upper_bound , ranks[order]);
// } else {
// for (unsigned i = 0; i < ranks[order]; ++i) {
// randomize<<<256,256>>>(&m[ld[order] * i], ld, ranks, order, lower_bound, upper_bound);
// }
// }
//}
//
//void GPU_MATHEMATICS::print(const unsigned* ary, unsigned sz) {
// for (unsigned i = 0; i < sz; ++i) {
// std::cout << ary[i] << " " << std::endl;
// }
//}
//
//
//__global__ void GPU_MATHEMATICS::print(const unsigned* ary, const unsigned* dims, const unsigned* lead_dims, unsigned index) {
//
//
//// if (index < 3) {
//// for (unsigned r = 0; r < dims[0]; ++r) {
////
//// if (r != 0)
//// std::cout << std::endl;
////
//// for (unsigned c = 0; c< dims[1]; ++c) {
//// auto str =std::to_string(ary[r + c * lead_dims[index - 1]]);
//// str = str.substr(0, str.length() < 3 ? str.length() : 3);
//// std::cout << str << " ";
//// }
//// }
//// std::cout << "]" << std::endl << std::endl;
////
//// } else {
//// std::cout << "[";
//// for (unsigned i = 0; i < dims[index - 1]; ++i) {
//// print(&ary[i * lead_dims[index - 1]], dims, lead_dims, index - 1);
//// }
//// }
//} | .file "tmpxft_0002d29f_00000000-6_LAR_General_CUDA.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | ///*
// * To change this license header, choose License Headers in Project Properties.
// * To change this template file, choose Tools | Templates
// * and open the template in the editor.
// */
//
///*
// * File: LAR_General.h
// * Author: joseph
// *
// * Created on July 23, 2017, 3:24 PM
// */
//
//#include "BLACKCAT_GPU_MATHEMATICS.cuh"
//
////unsigned
////__global__ void GPU_MATHEMATICS::initialize(unsigned*& d, unsigned sz) {
//// d = new unsigned[sz];
////}
////
////unsigned
////__global__ void GPU_MATHEMATICS::destruction(unsigned * d) {
//// delete [] d;
////}
//
//
//__global__ void GPU_MATHEMATICS::copy(unsigned * store, const unsigned * v, unsigned sz) {
// for (int i = 0; i < sz; ++i) {
// store[i] = v[i];
// }
//}
//
//
//__global__ void GPU_MATHEMATICS::fill(unsigned * m, unsigned f, unsigned sz) {
// for (int i = 0; i < sz; ++i) {
// m[i] = f;
// }
//}
//
//
//__global__ void GPU_MATHEMATICS::transpose(unsigned * s, unsigned s_ld, const unsigned * m, unsigned rows, unsigned cols, unsigned m_ld) {
//
// for (unsigned r = 0; r < rows; ++r) {
// for (unsigned c= 0; c< cols; ++c) {
// s[r * s_ld + c] = m[c * m_ld + r];
// }
// }
//}
//
//
//__global__ void GPU_MATHEMATICS::max(unsigned* max_val, const unsigned* data, const unsigned* ranks, const unsigned* ld, unsigned order) {
//
// if (order == 1) {
// for (unsigned i = 0; i < ranks[order-1]; ++i) {
// if (*max_val < data[i]) {
// *max_val = data[i];
// }
// }
// } else {
//
// for (unsigned i = 0; i < ranks[order-1]; ++i) {
// max<<<256,256>>>(max_val, &data[i * ld[order-1]], ranks, ld, order-1);
// }
// }
//}
//
//
//__global__ void GPU_MATHEMATICS::min(unsigned* min_val, const unsigned* data, const unsigned* ranks, const unsigned* ld, unsigned order) {
//
// if (order == 1) {
// for (unsigned i = 0; i < ranks[order-1]; ++i) {
// if (*min_val < data[i]) {
// *min_val = data[i];
// }
// }
// } else {
//
// for (unsigned i = 0; i < ranks[order-1]; ++i) {
// min<<<256,256>>>(min_val, &data[i * ld[order-1]], ranks, ld, order-1);
// }
// }
//}
//
//
//__global__ void GPU_MATHEMATICS::max_index(unsigned* max_val, unsigned* max_indexes, const unsigned* data, const unsigned* ranks, const unsigned* ld, unsigned order) {
//
// if (order == 1) {
// for (unsigned i = 0; i < ranks[order-1]; ++i) {
// if (*max_val < data[i]) {
// *max_val = data[i];
// *max_indexes = i;
// }
// }
// } else {
//
// unsigned curr_max;
// for (unsigned i = 0; i < ranks[order-1]; ++i) {
// curr_max = *max_val;
// max_index<<<256,256>>>(max_val, max_indexes, &data[i * ld[order-1]], ranks, ld, order-1);
//
// //if modified -- update
// if (curr_max != *max_val) {
// max_indexes[order-1] = i;
// }
// }
// }
//}
//
//
//
//__global__ void GPU_MATHEMATICS::min_index(unsigned* min_val, unsigned* min_indexes, const unsigned* data, const unsigned* ranks, const unsigned* ld, unsigned order) {
//
// if (order == 1) {
// for (unsigned i = 0; i < ranks[order-1]; ++i) {
// if (*min_val < data[i]) {
// *min_val = data[i];
// *min_indexes = i;
// }
// }
// } else {
//
// unsigned curr_max;
// for (unsigned i = 0; i < ranks[order-1]; ++i) {
// curr_max = *min_val;
// min_index<<<256,256>>>(min_val, min_indexes, &data[i * ld[order-1]], ranks, ld, order-1);
//
// //if modified -- update
// if (curr_max != *min_val) {
// min_indexes[order-1] = i;
// }
// }
// }
//}
//
//
//
//
//__global__ void GPU_MATHEMATICS::randomize(unsigned * m, unsigned lower_bound, unsigned upper_bound, unsigned sz) {
// for (int i = 0; i < sz; ++i) {
// m[i] = i % 2 == 0 ? 3 : -3 ;
// }
//}
//
//
//__global__ void GPU_MATHEMATICS::randomize(unsigned * m, unsigned* ld, unsigned* ranks, unsigned order, unsigned lower_bound, unsigned upper_bound) {
// --order;
// if (order == 0) {
// randomize<<<256,256>>>(m, lower_bound, upper_bound , ranks[order]);
// } else {
// for (unsigned i = 0; i < ranks[order]; ++i) {
// randomize<<<256,256>>>(&m[ld[order] * i], ld, ranks, order, lower_bound, upper_bound);
// }
// }
//}
//
//void GPU_MATHEMATICS::print(const unsigned* ary, unsigned sz) {
// for (unsigned i = 0; i < sz; ++i) {
// std::cout << ary[i] << " " << std::endl;
// }
//}
//
//
//__global__ void GPU_MATHEMATICS::print(const unsigned* ary, const unsigned* dims, const unsigned* lead_dims, unsigned index) {
//
//
//// if (index < 3) {
//// for (unsigned r = 0; r < dims[0]; ++r) {
////
//// if (r != 0)
//// std::cout << std::endl;
////
//// for (unsigned c = 0; c< dims[1]; ++c) {
//// auto str =std::to_string(ary[r + c * lead_dims[index - 1]]);
//// str = str.substr(0, str.length() < 3 ? str.length() : 3);
//// std::cout << str << " ";
//// }
//// }
//// std::cout << "]" << std::endl << std::endl;
////
//// } else {
//// std::cout << "[";
//// for (unsigned i = 0; i < dims[index - 1]; ++i) {
//// print(&ary[i * lead_dims[index - 1]], dims, lead_dims, index - 1);
//// }
//// }
//} | #include <hip/hip_runtime.h>
///*
// * To change this license header, choose License Headers in Project Properties.
// * To change this template file, choose Tools | Templates
// * and open the template in the editor.
// */
//
///*
// * File: LAR_General.h
// * Author: joseph
// *
// * Created on July 23, 2017, 3:24 PM
// */
//
//#include "BLACKCAT_GPU_MATHEMATICS.cuh"
//
////unsigned
////__global__ void GPU_MATHEMATICS::initialize(unsigned*& d, unsigned sz) {
//// d = new unsigned[sz];
////}
////
////unsigned
////__global__ void GPU_MATHEMATICS::destruction(unsigned * d) {
//// delete [] d;
////}
//
//
//__global__ void GPU_MATHEMATICS::copy(unsigned * store, const unsigned * v, unsigned sz) {
// for (int i = 0; i < sz; ++i) {
// store[i] = v[i];
// }
//}
//
//
//__global__ void GPU_MATHEMATICS::fill(unsigned * m, unsigned f, unsigned sz) {
// for (int i = 0; i < sz; ++i) {
// m[i] = f;
// }
//}
//
//
//__global__ void GPU_MATHEMATICS::transpose(unsigned * s, unsigned s_ld, const unsigned * m, unsigned rows, unsigned cols, unsigned m_ld) {
//
// for (unsigned r = 0; r < rows; ++r) {
// for (unsigned c= 0; c< cols; ++c) {
// s[r * s_ld + c] = m[c * m_ld + r];
// }
// }
//}
//
//
//__global__ void GPU_MATHEMATICS::max(unsigned* max_val, const unsigned* data, const unsigned* ranks, const unsigned* ld, unsigned order) {
//
// if (order == 1) {
// for (unsigned i = 0; i < ranks[order-1]; ++i) {
// if (*max_val < data[i]) {
// *max_val = data[i];
// }
// }
// } else {
//
// for (unsigned i = 0; i < ranks[order-1]; ++i) {
// max<<<256,256>>>(max_val, &data[i * ld[order-1]], ranks, ld, order-1);
// }
// }
//}
//
//
//__global__ void GPU_MATHEMATICS::min(unsigned* min_val, const unsigned* data, const unsigned* ranks, const unsigned* ld, unsigned order) {
//
// if (order == 1) {
// for (unsigned i = 0; i < ranks[order-1]; ++i) {
// if (*min_val < data[i]) {
// *min_val = data[i];
// }
// }
// } else {
//
// for (unsigned i = 0; i < ranks[order-1]; ++i) {
// min<<<256,256>>>(min_val, &data[i * ld[order-1]], ranks, ld, order-1);
// }
// }
//}
//
//
//__global__ void GPU_MATHEMATICS::max_index(unsigned* max_val, unsigned* max_indexes, const unsigned* data, const unsigned* ranks, const unsigned* ld, unsigned order) {
//
// if (order == 1) {
// for (unsigned i = 0; i < ranks[order-1]; ++i) {
// if (*max_val < data[i]) {
// *max_val = data[i];
// *max_indexes = i;
// }
// }
// } else {
//
// unsigned curr_max;
// for (unsigned i = 0; i < ranks[order-1]; ++i) {
// curr_max = *max_val;
// max_index<<<256,256>>>(max_val, max_indexes, &data[i * ld[order-1]], ranks, ld, order-1);
//
// //if modified -- update
// if (curr_max != *max_val) {
// max_indexes[order-1] = i;
// }
// }
// }
//}
//
//
//
//__global__ void GPU_MATHEMATICS::min_index(unsigned* min_val, unsigned* min_indexes, const unsigned* data, const unsigned* ranks, const unsigned* ld, unsigned order) {
//
// if (order == 1) {
// for (unsigned i = 0; i < ranks[order-1]; ++i) {
// if (*min_val < data[i]) {
// *min_val = data[i];
// *min_indexes = i;
// }
// }
// } else {
//
// unsigned curr_max;
// for (unsigned i = 0; i < ranks[order-1]; ++i) {
// curr_max = *min_val;
// min_index<<<256,256>>>(min_val, min_indexes, &data[i * ld[order-1]], ranks, ld, order-1);
//
// //if modified -- update
// if (curr_max != *min_val) {
// min_indexes[order-1] = i;
// }
// }
// }
//}
//
//
//
//
//__global__ void GPU_MATHEMATICS::randomize(unsigned * m, unsigned lower_bound, unsigned upper_bound, unsigned sz) {
// for (int i = 0; i < sz; ++i) {
// m[i] = i % 2 == 0 ? 3 : -3 ;
// }
//}
//
//
//__global__ void GPU_MATHEMATICS::randomize(unsigned * m, unsigned* ld, unsigned* ranks, unsigned order, unsigned lower_bound, unsigned upper_bound) {
// --order;
// if (order == 0) {
// randomize<<<256,256>>>(m, lower_bound, upper_bound , ranks[order]);
// } else {
// for (unsigned i = 0; i < ranks[order]; ++i) {
// randomize<<<256,256>>>(&m[ld[order] * i], ld, ranks, order, lower_bound, upper_bound);
// }
// }
//}
//
//void GPU_MATHEMATICS::print(const unsigned* ary, unsigned sz) {
// for (unsigned i = 0; i < sz; ++i) {
// std::cout << ary[i] << " " << std::endl;
// }
//}
//
//
//__global__ void GPU_MATHEMATICS::print(const unsigned* ary, const unsigned* dims, const unsigned* lead_dims, unsigned index) {
//
//
//// if (index < 3) {
//// for (unsigned r = 0; r < dims[0]; ++r) {
////
//// if (r != 0)
//// std::cout << std::endl;
////
//// for (unsigned c = 0; c< dims[1]; ++c) {
//// auto str =std::to_string(ary[r + c * lead_dims[index - 1]]);
//// str = str.substr(0, str.length() < 3 ? str.length() : 3);
//// std::cout << str << " ";
//// }
//// }
//// std::cout << "]" << std::endl << std::endl;
////
//// } else {
//// std::cout << "[";
//// for (unsigned i = 0; i < dims[index - 1]; ++i) {
//// print(&ary[i * lead_dims[index - 1]], dims, lead_dims, index - 1);
//// }
//// }
//} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
///*
// * To change this license header, choose License Headers in Project Properties.
// * To change this template file, choose Tools | Templates
// * and open the template in the editor.
// */
//
///*
// * File: LAR_General.h
// * Author: joseph
// *
// * Created on July 23, 2017, 3:24 PM
// */
//
//#include "BLACKCAT_GPU_MATHEMATICS.cuh"
//
////unsigned
////__global__ void GPU_MATHEMATICS::initialize(unsigned*& d, unsigned sz) {
//// d = new unsigned[sz];
////}
////
////unsigned
////__global__ void GPU_MATHEMATICS::destruction(unsigned * d) {
//// delete [] d;
////}
//
//
//__global__ void GPU_MATHEMATICS::copy(unsigned * store, const unsigned * v, unsigned sz) {
// for (int i = 0; i < sz; ++i) {
// store[i] = v[i];
// }
//}
//
//
//__global__ void GPU_MATHEMATICS::fill(unsigned * m, unsigned f, unsigned sz) {
// for (int i = 0; i < sz; ++i) {
// m[i] = f;
// }
//}
//
//
//__global__ void GPU_MATHEMATICS::transpose(unsigned * s, unsigned s_ld, const unsigned * m, unsigned rows, unsigned cols, unsigned m_ld) {
//
// for (unsigned r = 0; r < rows; ++r) {
// for (unsigned c= 0; c< cols; ++c) {
// s[r * s_ld + c] = m[c * m_ld + r];
// }
// }
//}
//
//
//__global__ void GPU_MATHEMATICS::max(unsigned* max_val, const unsigned* data, const unsigned* ranks, const unsigned* ld, unsigned order) {
//
// if (order == 1) {
// for (unsigned i = 0; i < ranks[order-1]; ++i) {
// if (*max_val < data[i]) {
// *max_val = data[i];
// }
// }
// } else {
//
// for (unsigned i = 0; i < ranks[order-1]; ++i) {
// max<<<256,256>>>(max_val, &data[i * ld[order-1]], ranks, ld, order-1);
// }
// }
//}
//
//
//__global__ void GPU_MATHEMATICS::min(unsigned* min_val, const unsigned* data, const unsigned* ranks, const unsigned* ld, unsigned order) {
//
// if (order == 1) {
// for (unsigned i = 0; i < ranks[order-1]; ++i) {
// if (*min_val < data[i]) {
// *min_val = data[i];
// }
// }
// } else {
//
// for (unsigned i = 0; i < ranks[order-1]; ++i) {
// min<<<256,256>>>(min_val, &data[i * ld[order-1]], ranks, ld, order-1);
// }
// }
//}
//
//
//__global__ void GPU_MATHEMATICS::max_index(unsigned* max_val, unsigned* max_indexes, const unsigned* data, const unsigned* ranks, const unsigned* ld, unsigned order) {
//
// if (order == 1) {
// for (unsigned i = 0; i < ranks[order-1]; ++i) {
// if (*max_val < data[i]) {
// *max_val = data[i];
// *max_indexes = i;
// }
// }
// } else {
//
// unsigned curr_max;
// for (unsigned i = 0; i < ranks[order-1]; ++i) {
// curr_max = *max_val;
// max_index<<<256,256>>>(max_val, max_indexes, &data[i * ld[order-1]], ranks, ld, order-1);
//
// //if modified -- update
// if (curr_max != *max_val) {
// max_indexes[order-1] = i;
// }
// }
// }
//}
//
//
//
//__global__ void GPU_MATHEMATICS::min_index(unsigned* min_val, unsigned* min_indexes, const unsigned* data, const unsigned* ranks, const unsigned* ld, unsigned order) {
//
// if (order == 1) {
// for (unsigned i = 0; i < ranks[order-1]; ++i) {
// if (*min_val < data[i]) {
// *min_val = data[i];
// *min_indexes = i;
// }
// }
// } else {
//
// unsigned curr_max;
// for (unsigned i = 0; i < ranks[order-1]; ++i) {
// curr_max = *min_val;
// min_index<<<256,256>>>(min_val, min_indexes, &data[i * ld[order-1]], ranks, ld, order-1);
//
// //if modified -- update
// if (curr_max != *min_val) {
// min_indexes[order-1] = i;
// }
// }
// }
//}
//
//
//
//
//__global__ void GPU_MATHEMATICS::randomize(unsigned * m, unsigned lower_bound, unsigned upper_bound, unsigned sz) {
// for (int i = 0; i < sz; ++i) {
// m[i] = i % 2 == 0 ? 3 : -3 ;
// }
//}
//
//
//__global__ void GPU_MATHEMATICS::randomize(unsigned * m, unsigned* ld, unsigned* ranks, unsigned order, unsigned lower_bound, unsigned upper_bound) {
// --order;
// if (order == 0) {
// randomize<<<256,256>>>(m, lower_bound, upper_bound , ranks[order]);
// } else {
// for (unsigned i = 0; i < ranks[order]; ++i) {
// randomize<<<256,256>>>(&m[ld[order] * i], ld, ranks, order, lower_bound, upper_bound);
// }
// }
//}
//
//void GPU_MATHEMATICS::print(const unsigned* ary, unsigned sz) {
// for (unsigned i = 0; i < sz; ++i) {
// std::cout << ary[i] << " " << std::endl;
// }
//}
//
//
//__global__ void GPU_MATHEMATICS::print(const unsigned* ary, const unsigned* dims, const unsigned* lead_dims, unsigned index) {
//
//
//// if (index < 3) {
//// for (unsigned r = 0; r < dims[0]; ++r) {
////
//// if (r != 0)
//// std::cout << std::endl;
////
//// for (unsigned c = 0; c< dims[1]; ++c) {
//// auto str =std::to_string(ary[r + c * lead_dims[index - 1]]);
//// str = str.substr(0, str.length() < 3 ? str.length() : 3);
//// std::cout << str << " ";
//// }
//// }
//// std::cout << "]" << std::endl << std::endl;
////
//// } else {
//// std::cout << "[";
//// for (unsigned i = 0; i < dims[index - 1]; ++i) {
//// print(&ary[i * lead_dims[index - 1]], dims, lead_dims, index - 1);
//// }
//// }
//} | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
///*
// * To change this license header, choose License Headers in Project Properties.
// * To change this template file, choose Tools | Templates
// * and open the template in the editor.
// */
//
///*
// * File: LAR_General.h
// * Author: joseph
// *
// * Created on July 23, 2017, 3:24 PM
// */
//
//#include "BLACKCAT_GPU_MATHEMATICS.cuh"
//
////unsigned
////__global__ void GPU_MATHEMATICS::initialize(unsigned*& d, unsigned sz) {
//// d = new unsigned[sz];
////}
////
////unsigned
////__global__ void GPU_MATHEMATICS::destruction(unsigned * d) {
//// delete [] d;
////}
//
//
//__global__ void GPU_MATHEMATICS::copy(unsigned * store, const unsigned * v, unsigned sz) {
// for (int i = 0; i < sz; ++i) {
// store[i] = v[i];
// }
//}
//
//
//__global__ void GPU_MATHEMATICS::fill(unsigned * m, unsigned f, unsigned sz) {
// for (int i = 0; i < sz; ++i) {
// m[i] = f;
// }
//}
//
//
//__global__ void GPU_MATHEMATICS::transpose(unsigned * s, unsigned s_ld, const unsigned * m, unsigned rows, unsigned cols, unsigned m_ld) {
//
// for (unsigned r = 0; r < rows; ++r) {
// for (unsigned c= 0; c< cols; ++c) {
// s[r * s_ld + c] = m[c * m_ld + r];
// }
// }
//}
//
//
//__global__ void GPU_MATHEMATICS::max(unsigned* max_val, const unsigned* data, const unsigned* ranks, const unsigned* ld, unsigned order) {
//
// if (order == 1) {
// for (unsigned i = 0; i < ranks[order-1]; ++i) {
// if (*max_val < data[i]) {
// *max_val = data[i];
// }
// }
// } else {
//
// for (unsigned i = 0; i < ranks[order-1]; ++i) {
// max<<<256,256>>>(max_val, &data[i * ld[order-1]], ranks, ld, order-1);
// }
// }
//}
//
//
//__global__ void GPU_MATHEMATICS::min(unsigned* min_val, const unsigned* data, const unsigned* ranks, const unsigned* ld, unsigned order) {
//
// if (order == 1) {
// for (unsigned i = 0; i < ranks[order-1]; ++i) {
// if (*min_val < data[i]) {
// *min_val = data[i];
// }
// }
// } else {
//
// for (unsigned i = 0; i < ranks[order-1]; ++i) {
// min<<<256,256>>>(min_val, &data[i * ld[order-1]], ranks, ld, order-1);
// }
// }
//}
//
//
//__global__ void GPU_MATHEMATICS::max_index(unsigned* max_val, unsigned* max_indexes, const unsigned* data, const unsigned* ranks, const unsigned* ld, unsigned order) {
//
// if (order == 1) {
// for (unsigned i = 0; i < ranks[order-1]; ++i) {
// if (*max_val < data[i]) {
// *max_val = data[i];
// *max_indexes = i;
// }
// }
// } else {
//
// unsigned curr_max;
// for (unsigned i = 0; i < ranks[order-1]; ++i) {
// curr_max = *max_val;
// max_index<<<256,256>>>(max_val, max_indexes, &data[i * ld[order-1]], ranks, ld, order-1);
//
// //if modified -- update
// if (curr_max != *max_val) {
// max_indexes[order-1] = i;
// }
// }
// }
//}
//
//
//
//__global__ void GPU_MATHEMATICS::min_index(unsigned* min_val, unsigned* min_indexes, const unsigned* data, const unsigned* ranks, const unsigned* ld, unsigned order) {
//
// if (order == 1) {
// for (unsigned i = 0; i < ranks[order-1]; ++i) {
// if (*min_val < data[i]) {
// *min_val = data[i];
// *min_indexes = i;
// }
// }
// } else {
//
// unsigned curr_max;
// for (unsigned i = 0; i < ranks[order-1]; ++i) {
// curr_max = *min_val;
// min_index<<<256,256>>>(min_val, min_indexes, &data[i * ld[order-1]], ranks, ld, order-1);
//
// //if modified -- update
// if (curr_max != *min_val) {
// min_indexes[order-1] = i;
// }
// }
// }
//}
//
//
//
//
//__global__ void GPU_MATHEMATICS::randomize(unsigned * m, unsigned lower_bound, unsigned upper_bound, unsigned sz) {
// for (int i = 0; i < sz; ++i) {
// m[i] = i % 2 == 0 ? 3 : -3 ;
// }
//}
//
//
//__global__ void GPU_MATHEMATICS::randomize(unsigned * m, unsigned* ld, unsigned* ranks, unsigned order, unsigned lower_bound, unsigned upper_bound) {
// --order;
// if (order == 0) {
// randomize<<<256,256>>>(m, lower_bound, upper_bound , ranks[order]);
// } else {
// for (unsigned i = 0; i < ranks[order]; ++i) {
// randomize<<<256,256>>>(&m[ld[order] * i], ld, ranks, order, lower_bound, upper_bound);
// }
// }
//}
//
//void GPU_MATHEMATICS::print(const unsigned* ary, unsigned sz) {
// for (unsigned i = 0; i < sz; ++i) {
// std::cout << ary[i] << " " << std::endl;
// }
//}
//
//
//__global__ void GPU_MATHEMATICS::print(const unsigned* ary, const unsigned* dims, const unsigned* lead_dims, unsigned index) {
//
//
//// if (index < 3) {
//// for (unsigned r = 0; r < dims[0]; ++r) {
////
//// if (r != 0)
//// std::cout << std::endl;
////
//// for (unsigned c = 0; c< dims[1]; ++c) {
//// auto str =std::to_string(ary[r + c * lead_dims[index - 1]]);
//// str = str.substr(0, str.length() < 3 ? str.length() : 3);
//// std::cout << str << " ";
//// }
//// }
//// std::cout << "]" << std::endl << std::endl;
////
//// } else {
//// std::cout << "[";
//// for (unsigned i = 0; i < dims[index - 1]; ++i) {
//// print(&ary[i * lead_dims[index - 1]], dims, lead_dims, index - 1);
//// }
//// }
//} | .text
.file "LAR_General_CUDA.hip"
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80 | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0002d29f_00000000-6_LAR_General_CUDA.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "LAR_General_CUDA.hip"
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | //=============================================================================================
// Name : thread3dStl.cu
// Author : Jose Refojo
// Version :
// Creation date : 26-02-2014
// Copyright : Copyright belongs to Trinity Centre for High Performance Computing
// Description : This program will initialize a number of arrays stored in stl vectors,
// then it will grab data from each thread (such as thread position inside the block and block),
// save it, send it back into the main memory, and print it
//=============================================================================================
#include <iostream>
#include <stdio.h>
#include <vector>
using namespace std;
#define BLOCK_SIZE 2
__global__ void scanTheadInformationGPU(float *threadXIdsGPU,int N,int M,int L) {
int idx=blockIdx.x*blockDim.x+threadIdx.x;
int idy=blockIdx.y*blockDim.y+threadIdx.y;
int idz=blockIdx.z*blockDim.z+threadIdx.z;
if ( idx < N ) {
if ( idy < M ) {
if ( idz < L ) {
threadXIdsGPU[idx+idy*N+idz*N*M]=-(idx+idy*N+idz*N*M);
//threadXIdsGPU[idz+idy*L+idx*M*L]=-(idz+idy*L+idx*M*L);
}
}
}
}
int main() {
// pointers to host memory matrices
std::vector< float > vector1d;
std::vector< std::vector< float* > > vector3d;
float *vector1d_gpu;
// pointers to device memory matrices
//float *vectorGPU;
// N,M and L are the sizes on each dimension
int N=2,M=3,L=4,totalSize;
unsigned int ui,uj,uk;
totalSize=N*M*L;
// Allocate arrays threadIds and blockIds on host
vector1d.resize(totalSize);
vector3d.resize(N, std::vector< float* > (M));
for (ui=0;ui<N;ui++) {
for (uj=0;uj<M;uj++) {
vector3d[ui][uj]=&(vector1d[uj*L+ui*M*L]);
}
}
for (ui=0;ui<N;ui++) {
for (uj=0;uj<M;uj++) {
for (uk=0;uk<L;uk++) {
//vector1d[ui+uj*N+uk*N*M]=ui+uj*N+uk*N*M;
vector1d[uk+uj*L+ui*M*L]=uk+uj*L+ui*M*L;
}
}
}
// Allocate arrays threadIdsGPU and blockIdsGPU on device
cudaMalloc ((void **) &vector1d_gpu, sizeof(float)*N*M*L);
// Copy data from host memory to device memory
cudaMemcpy(vector1d_gpu, &(vector1d[0]), sizeof(float)*N*M*L, cudaMemcpyHostToDevice);
int block_size=BLOCK_SIZE;
dim3 dimBlock(block_size,block_size,block_size);
dim3 dimGrid ( (N/dimBlock.x) + (!(N%dimBlock.x)?0:1),(M/dimBlock.y) + (!(M%dimBlock.y)?0:1),(L/dimBlock.y) + (!(L%dimBlock.y)?0:1));
// Call the kernel
scanTheadInformationGPU <<<dimGrid,dimBlock>>> (vector1d_gpu,N,M,L);
// Copy data from device memory to host memory
cudaMemcpy(&(vector1d[0]), vector1d_gpu, sizeof(float)*N*M*L, cudaMemcpyDeviceToHost);
// Print all the data about the threads
cout << "vector1d_host: (" << N << "," << M << "," << L << ")" << endl;
for (ui=0;ui<N;ui++) {
cout << "vector1d_host, slice in Z " << ui << ":" << endl;
for (uj=0;uj<M;uj++) {
for (uk=0;uk<L;uk++) {
cout << vector1d[uk+uj*L+ui*M*L];
if (uk==L-1) { cout << endl;} else { cout << "\t"; }
}
}
}
cout << "vector3d_host: (" << N << "," << M << "," << L << ")" << endl;
for (ui=0;ui<N;ui++) {
cout << "vector3d_host, slice in Z " << ui << ":" << endl;
for (uj=0;uj<M;uj++) {
for (uk=0;uk<L;uk++) {
cout << vector3d[ui][uj][uk];
if (uk==L-1) { cout << endl;} else { cout << "\t"; }
}
}
}
/*
cout << "vector1d_host:" <<endl;
for (ui=0; ui<numRows; ui++) {
for (uj=0; uj<numColumns; uj++) {
cout << input2d_host[ui][uj];
if (uj==numColumns-1) { cout << endl;} else { cout << "\t"; }
}
}
cout << endl;
*/
} | code for sm_80
Function : _Z23scanTheadInformationGPUPfiii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R2, SR_CTAID.Y ; /* 0x0000000000027919 */
/* 0x000e280000002600 */
/*0020*/ S2R R5, SR_TID.Y ; /* 0x0000000000057919 */
/* 0x000e280000002200 */
/*0030*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e680000002500 */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e680000002100 */
/*0050*/ S2R R4, SR_CTAID.Z ; /* 0x0000000000047919 */
/* 0x000ea80000002700 */
/*0060*/ S2R R7, SR_TID.Z ; /* 0x0000000000077919 */
/* 0x000ea20000002300 */
/*0070*/ IMAD R2, R2, c[0x0][0x4], R5 ; /* 0x0000010002027a24 */
/* 0x001fca00078e0205 */
/*0080*/ ISETP.GE.AND P0, PT, R2, c[0x0][0x16c], PT ; /* 0x00005b0002007a0c */
/* 0x000fe20003f06270 */
/*0090*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x002fca00078e0203 */
/*00a0*/ ISETP.GE.OR P0, PT, R0, c[0x0][0x168], P0 ; /* 0x00005a0000007a0c */
/* 0x000fe20000706670 */
/*00b0*/ IMAD R3, R4, c[0x0][0x8], R7 ; /* 0x0000020004037a24 */
/* 0x004fca00078e0207 */
/*00c0*/ ISETP.GE.OR P0, PT, R3, c[0x0][0x170], P0 ; /* 0x00005c0003007a0c */
/* 0x000fda0000706670 */
/*00d0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00e0*/ IMAD R3, R3, c[0x0][0x16c], R2 ; /* 0x00005b0003037a24 */
/* 0x000fe200078e0202 */
/*00f0*/ HFMA2.MMA R2, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff027435 */
/* 0x000fe200000001ff */
/*0100*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0110*/ IMAD R3, R3, c[0x0][0x168], R0 ; /* 0x00005a0003037a24 */
/* 0x000fca00078e0200 */
/*0120*/ IADD3 R5, -R3.reuse, RZ, RZ ; /* 0x000000ff03057210 */
/* 0x040fe40007ffe1ff */
/*0130*/ IMAD.WIDE R2, R3, R2, c[0x0][0x160] ; /* 0x0000580003027625 */
/* 0x000fc800078e0202 */
/*0140*/ I2F R5, R5 ; /* 0x0000000500057306 */
/* 0x000e240000201400 */
/*0150*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x001fe2000c101904 */
/*0160*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0170*/ BRA 0x170; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | //=============================================================================================
// Name : thread3dStl.cu
// Author : Jose Refojo
// Version :
// Creation date : 26-02-2014
// Copyright : Copyright belongs to Trinity Centre for High Performance Computing
// Description : This program will initialize a number of arrays stored in stl vectors,
// then it will grab data from each thread (such as thread position inside the block and block),
// save it, send it back into the main memory, and print it
//=============================================================================================
#include <iostream>
#include <stdio.h>
#include <vector>
using namespace std;
#define BLOCK_SIZE 2
__global__ void scanTheadInformationGPU(float *threadXIdsGPU,int N,int M,int L) {
int idx=blockIdx.x*blockDim.x+threadIdx.x;
int idy=blockIdx.y*blockDim.y+threadIdx.y;
int idz=blockIdx.z*blockDim.z+threadIdx.z;
if ( idx < N ) {
if ( idy < M ) {
if ( idz < L ) {
threadXIdsGPU[idx+idy*N+idz*N*M]=-(idx+idy*N+idz*N*M);
//threadXIdsGPU[idz+idy*L+idx*M*L]=-(idz+idy*L+idx*M*L);
}
}
}
}
int main() {
// pointers to host memory matrices
std::vector< float > vector1d;
std::vector< std::vector< float* > > vector3d;
float *vector1d_gpu;
// pointers to device memory matrices
//float *vectorGPU;
// N,M and L are the sizes on each dimension
int N=2,M=3,L=4,totalSize;
unsigned int ui,uj,uk;
totalSize=N*M*L;
// Allocate arrays threadIds and blockIds on host
vector1d.resize(totalSize);
vector3d.resize(N, std::vector< float* > (M));
for (ui=0;ui<N;ui++) {
for (uj=0;uj<M;uj++) {
vector3d[ui][uj]=&(vector1d[uj*L+ui*M*L]);
}
}
for (ui=0;ui<N;ui++) {
for (uj=0;uj<M;uj++) {
for (uk=0;uk<L;uk++) {
//vector1d[ui+uj*N+uk*N*M]=ui+uj*N+uk*N*M;
vector1d[uk+uj*L+ui*M*L]=uk+uj*L+ui*M*L;
}
}
}
// Allocate arrays threadIdsGPU and blockIdsGPU on device
cudaMalloc ((void **) &vector1d_gpu, sizeof(float)*N*M*L);
// Copy data from host memory to device memory
cudaMemcpy(vector1d_gpu, &(vector1d[0]), sizeof(float)*N*M*L, cudaMemcpyHostToDevice);
int block_size=BLOCK_SIZE;
dim3 dimBlock(block_size,block_size,block_size);
dim3 dimGrid ( (N/dimBlock.x) + (!(N%dimBlock.x)?0:1),(M/dimBlock.y) + (!(M%dimBlock.y)?0:1),(L/dimBlock.y) + (!(L%dimBlock.y)?0:1));
// Call the kernel
scanTheadInformationGPU <<<dimGrid,dimBlock>>> (vector1d_gpu,N,M,L);
// Copy data from device memory to host memory
cudaMemcpy(&(vector1d[0]), vector1d_gpu, sizeof(float)*N*M*L, cudaMemcpyDeviceToHost);
// Print all the data about the threads
cout << "vector1d_host: (" << N << "," << M << "," << L << ")" << endl;
for (ui=0;ui<N;ui++) {
cout << "vector1d_host, slice in Z " << ui << ":" << endl;
for (uj=0;uj<M;uj++) {
for (uk=0;uk<L;uk++) {
cout << vector1d[uk+uj*L+ui*M*L];
if (uk==L-1) { cout << endl;} else { cout << "\t"; }
}
}
}
cout << "vector3d_host: (" << N << "," << M << "," << L << ")" << endl;
for (ui=0;ui<N;ui++) {
cout << "vector3d_host, slice in Z " << ui << ":" << endl;
for (uj=0;uj<M;uj++) {
for (uk=0;uk<L;uk++) {
cout << vector3d[ui][uj][uk];
if (uk==L-1) { cout << endl;} else { cout << "\t"; }
}
}
}
/*
cout << "vector1d_host:" <<endl;
for (ui=0; ui<numRows; ui++) {
for (uj=0; uj<numColumns; uj++) {
cout << input2d_host[ui][uj];
if (uj==numColumns-1) { cout << endl;} else { cout << "\t"; }
}
}
cout << endl;
*/
} | .file "tmpxft_00011acb_00000000-6_thread3dStl.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB4055:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4055:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z46__device_stub__Z23scanTheadInformationGPUPfiiiPfiii
.type _Z46__device_stub__Z23scanTheadInformationGPUPfiiiPfiii, @function
_Z46__device_stub__Z23scanTheadInformationGPUPfiiiPfiii:
.LFB4077:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movl %esi, 20(%rsp)
movl %edx, 16(%rsp)
movl %ecx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 20(%rsp), %rax
movq %rax, 104(%rsp)
leaq 16(%rsp), %rax
movq %rax, 112(%rsp)
leaq 12(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z23scanTheadInformationGPUPfiii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE4077:
.size _Z46__device_stub__Z23scanTheadInformationGPUPfiiiPfiii, .-_Z46__device_stub__Z23scanTheadInformationGPUPfiiiPfiii
.globl _Z23scanTheadInformationGPUPfiii
.type _Z23scanTheadInformationGPUPfiii, @function
_Z23scanTheadInformationGPUPfiii:
.LFB4078:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z46__device_stub__Z23scanTheadInformationGPUPfiiiPfiii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4078:
.size _Z23scanTheadInformationGPUPfiii, .-_Z23scanTheadInformationGPUPfiii
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z23scanTheadInformationGPUPfiii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB4080:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z23scanTheadInformationGPUPfiii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4080:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .text._ZNSt6vectorIfSaIfEED2Ev,"axG",@progbits,_ZNSt6vectorIfSaIfEED5Ev,comdat
.align 2
.weak _ZNSt6vectorIfSaIfEED2Ev
.type _ZNSt6vectorIfSaIfEED2Ev, @function
_ZNSt6vectorIfSaIfEED2Ev:
.LFB4390:
.cfi_startproc
endbr64
movq (%rdi), %rax
testq %rax, %rax
je .L16
subq $8, %rsp
.cfi_def_cfa_offset 16
movq 16(%rdi), %rsi
subq %rax, %rsi
movq %rax, %rdi
call _ZdlPvm@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.L16:
ret
.cfi_endproc
.LFE4390:
.size _ZNSt6vectorIfSaIfEED2Ev, .-_ZNSt6vectorIfSaIfEED2Ev
.weak _ZNSt6vectorIfSaIfEED1Ev
.set _ZNSt6vectorIfSaIfEED1Ev,_ZNSt6vectorIfSaIfEED2Ev
.section .text._ZNSt6vectorIS_IPfSaIS0_EESaIS2_EED2Ev,"axG",@progbits,_ZNSt6vectorIS_IPfSaIS0_EESaIS2_EED5Ev,comdat
.align 2
.weak _ZNSt6vectorIS_IPfSaIS0_EESaIS2_EED2Ev
.type _ZNSt6vectorIS_IPfSaIS0_EESaIS2_EED2Ev, @function
_ZNSt6vectorIS_IPfSaIS0_EESaIS2_EED2Ev:
.LFB4402:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
movq %rdi, %r12
movq 8(%rdi), %rbp
movq (%rdi), %rbx
cmpq %rbx, %rbp
jne .L22
.L20:
movq (%r12), %rdi
testq %rdi, %rdi
je .L19
movq 16(%r12), %rsi
subq %rdi, %rsi
call _ZdlPvm@PLT
.L19:
popq %rbx
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L21:
.cfi_restore_state
addq $24, %rbx
cmpq %rbx, %rbp
je .L20
.L22:
movq (%rbx), %rdi
testq %rdi, %rdi
je .L21
movq 16(%rbx), %rsi
subq %rdi, %rsi
call _ZdlPvm@PLT
jmp .L21
.cfi_endproc
.LFE4402:
.size _ZNSt6vectorIS_IPfSaIS0_EESaIS2_EED2Ev, .-_ZNSt6vectorIS_IPfSaIS0_EESaIS2_EED2Ev
.weak _ZNSt6vectorIS_IPfSaIS0_EESaIS2_EED1Ev
.set _ZNSt6vectorIS_IPfSaIS0_EESaIS2_EED1Ev,_ZNSt6vectorIS_IPfSaIS0_EESaIS2_EED2Ev
.section .text._ZNSt6vectorIPfSaIS0_EED2Ev,"axG",@progbits,_ZNSt6vectorIPfSaIS0_EED5Ev,comdat
.align 2
.weak _ZNSt6vectorIPfSaIS0_EED2Ev
.type _ZNSt6vectorIPfSaIS0_EED2Ev, @function
_ZNSt6vectorIPfSaIS0_EED2Ev:
.LFB4415:
.cfi_startproc
endbr64
movq (%rdi), %rax
testq %rax, %rax
je .L29
subq $8, %rsp
.cfi_def_cfa_offset 16
movq 16(%rdi), %rsi
subq %rax, %rsi
movq %rax, %rdi
call _ZdlPvm@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.L29:
ret
.cfi_endproc
.LFE4415:
.size _ZNSt6vectorIPfSaIS0_EED2Ev, .-_ZNSt6vectorIPfSaIS0_EED2Ev
.weak _ZNSt6vectorIPfSaIS0_EED1Ev
.set _ZNSt6vectorIPfSaIS0_EED1Ev,_ZNSt6vectorIPfSaIS0_EED2Ev
.section .text._ZNSt6vectorIPfSaIS0_EEaSERKS2_,"axG",@progbits,_ZNSt6vectorIPfSaIS0_EEaSERKS2_,comdat
.align 2
.weak _ZNSt6vectorIPfSaIS0_EEaSERKS2_
.type _ZNSt6vectorIPfSaIS0_EEaSERKS2_, @function
_ZNSt6vectorIPfSaIS0_EEaSERKS2_:
.LFB4935:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $8, %rsp
.cfi_def_cfa_offset 48
movq %rdi, %rbx
cmpq %rdi, %rsi
je .L33
movq %rsi, %r12
movq (%rsi), %r13
movq 8(%rsi), %rbp
subq %r13, %rbp
movq (%rdi), %rdi
movq 16(%rbx), %rax
subq %rdi, %rax
cmpq %rbp, %rax
jb .L46
movq 8(%rbx), %rdx
subq %rdi, %rdx
cmpq %rbp, %rdx
jb .L40
cmpq $8, %rbp
jle .L41
movq %rbp, %rdx
movq %r13, %rsi
call memmove@PLT
jmp .L39
.L46:
movabsq $9223372036854775800, %rax
cmpq %rbp, %rax
jb .L47
movq %rbp, %rdi
call _Znwm@PLT
movq %rax, %r12
cmpq $8, %rbp
jle .L36
movq %rbp, %rdx
movq %r13, %rsi
movq %rax, %rdi
call memmove@PLT
.L37:
movq (%rbx), %rdi
testq %rdi, %rdi
je .L38
movq 16(%rbx), %rsi
subq %rdi, %rsi
call _ZdlPvm@PLT
.L38:
movq %r12, (%rbx)
addq %rbp, %r12
movq %r12, 16(%rbx)
.L39:
addq (%rbx), %rbp
movq %rbp, 8(%rbx)
.L33:
movq %rbx, %rax
addq $8, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L47:
.cfi_restore_state
call _ZSt28__throw_bad_array_new_lengthv@PLT
.L36:
movq 0(%r13), %rax
movq %rax, (%r12)
jmp .L37
.L41:
jne .L39
movq 0(%r13), %rax
movq %rax, (%rdi)
jmp .L39
.L40:
cmpq $8, %rdx
jle .L42
movq %r13, %rsi
call memmove@PLT
.L43:
movq 8(%rbx), %rdi
movq %rdi, %rsi
subq (%rbx), %rsi
addq (%r12), %rsi
movq 8(%r12), %rdx
subq %rsi, %rdx
cmpq $8, %rdx
jle .L44
call memmove@PLT
jmp .L39
.L42:
jne .L43
movq 0(%r13), %rax
movq %rax, (%rdi)
jmp .L43
.L44:
jne .L39
movq (%rsi), %rax
movq %rax, (%rdi)
jmp .L39
.cfi_endproc
.LFE4935:
.size _ZNSt6vectorIPfSaIS0_EEaSERKS2_, .-_ZNSt6vectorIPfSaIS0_EEaSERKS2_
.section .text._ZSt18__do_uninit_fill_nIPSt6vectorIPfSaIS1_EEmS3_ET_S5_T0_RKT1_,"axG",@progbits,_ZSt18__do_uninit_fill_nIPSt6vectorIPfSaIS1_EEmS3_ET_S5_T0_RKT1_,comdat
.weak _ZSt18__do_uninit_fill_nIPSt6vectorIPfSaIS1_EEmS3_ET_S5_T0_RKT1_
.type _ZSt18__do_uninit_fill_nIPSt6vectorIPfSaIS1_EEmS3_ET_S5_T0_RKT1_, @function
_ZSt18__do_uninit_fill_nIPSt6vectorIPfSaIS1_EEmS3_ET_S5_T0_RKT1_:
.LFB4937:
.cfi_startproc
.cfi_personality 0x9b,DW.ref.__gxx_personality_v0
.cfi_lsda 0x1b,.LLSDA4937
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $24, %rsp
.cfi_def_cfa_offset 80
movq %rdi, 8(%rsp)
testq %rsi, %rsi
je .L59
movq %rsi, %r14
movq %rdx, %r13
movq %rdi, %rbx
movabsq $9223372036854775800, %r15
jmp .L54
.L65:
.LEHB0:
call _ZSt28__throw_bad_array_new_lengthv@PLT
.LEHE0:
.L61:
endbr64
movq %rax, %rdi
call __cxa_begin_catch@PLT
.L56:
cmpq %rbx, 8(%rsp)
jne .L57
.LEHB1:
call __cxa_rethrow@PLT
.LEHE1:
.L62:
endbr64
movq %rax, %rbx
call __cxa_end_catch@PLT
movq %rbx, %rdi
.LEHB2:
call _Unwind_Resume@PLT
.LEHE2:
.L66:
movq %rax, %rbp
.L50:
movq %rbp, (%rbx)
movq %rbp, 8(%rbx)
addq %rbp, %r12
movq %r12, 16(%rbx)
movq 0(%r13), %rsi
movq 8(%r13), %r12
subq %rsi, %r12
cmpq $8, %r12
jle .L52
movq %r12, %rdx
movq %rbp, %rdi
call memmove@PLT
.L53:
addq %r12, %rbp
movq %rbp, 8(%rbx)
addq $24, %rbx
subq $1, %r14
je .L48
.L54:
movq 8(%r13), %r12
subq 0(%r13), %r12
movq $0, (%rbx)
movq $0, 8(%rbx)
movq $0, 16(%rbx)
je .L60
cmpq %r12, %r15
jb .L65
movq %r12, %rdi
.LEHB3:
call _Znwm@PLT
.LEHE3:
jmp .L66
.L60:
movl $0, %ebp
jmp .L50
.L52:
jne .L53
movq (%rsi), %rax
movq %rax, 0(%rbp)
jmp .L53
.L59:
movq 8(%rsp), %rbx
.L48:
movq %rbx, %rax
addq $24, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L57:
.cfi_restore_state
movq 8(%rsp), %r15
movq %r15, %rdi
call _ZNSt6vectorIPfSaIS0_EED1Ev
movq %r15, %rax
addq $24, %rax
movq %rax, 8(%rsp)
jmp .L56
.cfi_endproc
.LFE4937:
.globl __gxx_personality_v0
.section .gcc_except_table._ZSt18__do_uninit_fill_nIPSt6vectorIPfSaIS1_EEmS3_ET_S5_T0_RKT1_,"aG",@progbits,_ZSt18__do_uninit_fill_nIPSt6vectorIPfSaIS1_EEmS3_ET_S5_T0_RKT1_,comdat
.align 4
.LLSDA4937:
.byte 0xff
.byte 0x9b
.uleb128 .LLSDATT4937-.LLSDATTD4937
.LLSDATTD4937:
.byte 0x1
.uleb128 .LLSDACSE4937-.LLSDACSB4937
.LLSDACSB4937:
.uleb128 .LEHB0-.LFB4937
.uleb128 .LEHE0-.LEHB0
.uleb128 .L61-.LFB4937
.uleb128 0x1
.uleb128 .LEHB1-.LFB4937
.uleb128 .LEHE1-.LEHB1
.uleb128 .L62-.LFB4937
.uleb128 0
.uleb128 .LEHB2-.LFB4937
.uleb128 .LEHE2-.LEHB2
.uleb128 0
.uleb128 0
.uleb128 .LEHB3-.LFB4937
.uleb128 .LEHE3-.LEHB3
.uleb128 .L61-.LFB4937
.uleb128 0x1
.LLSDACSE4937:
.byte 0x1
.byte 0
.align 4
.long 0
.LLSDATT4937:
.section .text._ZSt18__do_uninit_fill_nIPSt6vectorIPfSaIS1_EEmS3_ET_S5_T0_RKT1_,"axG",@progbits,_ZSt18__do_uninit_fill_nIPSt6vectorIPfSaIS1_EEmS3_ET_S5_T0_RKT1_,comdat
.size _ZSt18__do_uninit_fill_nIPSt6vectorIPfSaIS1_EEmS3_ET_S5_T0_RKT1_, .-_ZSt18__do_uninit_fill_nIPSt6vectorIPfSaIS1_EEmS3_ET_S5_T0_RKT1_
.section .text._ZSt16__do_uninit_copyISt13move_iteratorIPSt6vectorIPfSaIS2_EEES5_ET0_T_S8_S7_,"axG",@progbits,_ZSt16__do_uninit_copyISt13move_iteratorIPSt6vectorIPfSaIS2_EEES5_ET0_T_S8_S7_,comdat
.weak _ZSt16__do_uninit_copyISt13move_iteratorIPSt6vectorIPfSaIS2_EEES5_ET0_T_S8_S7_
.type _ZSt16__do_uninit_copyISt13move_iteratorIPSt6vectorIPfSaIS2_EEES5_ET0_T_S8_S7_, @function
_ZSt16__do_uninit_copyISt13move_iteratorIPSt6vectorIPfSaIS2_EEES5_ET0_T_S8_S7_:
.LFB4953:
.cfi_startproc
endbr64
movq %rdx, %rax
cmpq %rsi, %rdi
je .L67
movq %rdi, %rdx
movq %rax, %rcx
.L69:
movq (%rdx), %r8
movq %r8, (%rcx)
movq 8(%rdx), %r8
movq %r8, 8(%rcx)
movq 16(%rdx), %r8
movq %r8, 16(%rcx)
movq $0, 16(%rdx)
movq $0, 8(%rdx)
movq $0, (%rdx)
addq $24, %rcx
addq $24, %rdx
cmpq %rdx, %rsi
jne .L69
leaq -24(%rsi), %rdx
subq %rdi, %rdx
shrq $3, %rdx
movabsq $768614336404564651, %rcx
imulq %rcx, %rdx
movabsq $2305843009213693951, %rcx
andq %rcx, %rdx
leaq 3(%rdx,%rdx,2), %rdx
leaq (%rax,%rdx,8), %rax
.L67:
ret
.cfi_endproc
.LFE4953:
.size _ZSt16__do_uninit_copyISt13move_iteratorIPSt6vectorIPfSaIS2_EEES5_ET0_T_S8_S7_, .-_ZSt16__do_uninit_copyISt13move_iteratorIPSt6vectorIPfSaIS2_EEES5_ET0_T_S8_S7_
.section .rodata._ZNSt6vectorIS_IPfSaIS0_EESaIS2_EE14_M_fill_insertEN9__gnu_cxx17__normal_iteratorIPS2_S4_EEmRKS2_.str1.1,"aMS",@progbits,1
.LC1:
.string "vector::_M_fill_insert"
.section .text._ZNSt6vectorIS_IPfSaIS0_EESaIS2_EE14_M_fill_insertEN9__gnu_cxx17__normal_iteratorIPS2_S4_EEmRKS2_,"axG",@progbits,_ZNSt6vectorIS_IPfSaIS0_EESaIS2_EE14_M_fill_insertEN9__gnu_cxx17__normal_iteratorIPS2_S4_EEmRKS2_,comdat
.align 2
.weak _ZNSt6vectorIS_IPfSaIS0_EESaIS2_EE14_M_fill_insertEN9__gnu_cxx17__normal_iteratorIPS2_S4_EEmRKS2_
.type _ZNSt6vectorIS_IPfSaIS0_EESaIS2_EE14_M_fill_insertEN9__gnu_cxx17__normal_iteratorIPS2_S4_EEmRKS2_, @function
_ZNSt6vectorIS_IPfSaIS0_EESaIS2_EE14_M_fill_insertEN9__gnu_cxx17__normal_iteratorIPS2_S4_EEmRKS2_:
.LFB4617:
.cfi_startproc
.cfi_personality 0x9b,DW.ref.__gxx_personality_v0
.cfi_lsda 0x1b,.LLSDA4617
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $88, %rsp
.cfi_def_cfa_offset 144
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
testq %rdx, %rdx
je .L72
movq %rdi, %rbp
movq %rsi, %r12
movq %rdx, %rbx
movq %rcx, %r13
movq %rsi, %r15
movq 8(%rdi), %r14
movq 16(%rdi), %rax
subq %r14, %rax
sarq $3, %rax
movabsq $-6148914691236517205, %rdx
imulq %rdx, %rax
cmpq %rbx, %rax
jb .L74
movq %rdi, 32(%rsp)
movq 8(%rcx), %rax
subq (%rcx), %rax
movq %rax, %rcx
movq %rax, (%rsp)
je .L108
movabsq $9223372036854775800, %rax
cmpq %rcx, %rax
jb .L118
movq (%rsp), %rdi
.LEHB4:
call _Znwm@PLT
movq %rax, %r14
.L75:
movq %r14, 40(%rsp)
movq (%rsp), %rax
addq %r14, %rax
movq %rax, 56(%rsp)
movq 0(%r13), %rsi
movq 8(%r13), %rax
subq %rsi, %rax
movq %rax, %r13
cmpq $8, %rax
jle .L78
movq %rax, %rdx
movq %r14, %rdi
call memmove@PLT
.L79:
addq %r14, %r13
movq %r13, 48(%rsp)
movq 8(%rbp), %r13
movq %r13, %rax
subq %r12, %rax
movq %rax, 8(%rsp)
sarq $3, %rax
movabsq $-6148914691236517205, %rdx
imulq %rdx, %rax
cmpq %rax, %rbx
jnb .L80
leaq (%rbx,%rbx,2), %rax
salq $3, %rax
movq %r13, %rbx
movq %rax, 8(%rsp)
subq %rax, %rbx
movq %r13, %rdx
movq %r13, %rsi
movq %rbx, %rdi
call _ZSt16__do_uninit_copyISt13move_iteratorIPSt6vectorIPfSaIS2_EEES5_ET0_T_S8_S7_
movq 8(%rsp), %rax
addq %rax, 8(%rbp)
movq %rbx, %rdx
subq %r12, %rdx
movq %rdx, %rax
sarq $3, %rax
movabsq $-6148914691236517205, %rcx
imulq %rcx, %rax
testq %rdx, %rdx
jle .L81
leaq 0(,%rax,4), %rdx
subq %rdx, %rax
leaq 0(%r13,%rax,8), %rbp
jmp .L83
.L118:
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L119
call _ZSt28__throw_bad_array_new_lengthv@PLT
.LEHE4:
.L119:
call __stack_chk_fail@PLT
.L108:
movl $0, %r14d
jmp .L75
.L78:
jne .L79
movq (%rsi), %rax
movq %rax, (%r14)
jmp .L79
.L82:
cmpq %r13, %rbp
je .L81
.L83:
subq $24, %rbx
subq $24, %r13
movq 0(%r13), %rdi
movq 16(%r13), %rsi
movq (%rbx), %rax
movq %rax, 0(%r13)
movq 8(%rbx), %rax
movq %rax, 8(%r13)
movq 16(%rbx), %rax
movq %rax, 16(%r13)
movq $0, (%rbx)
movq $0, 8(%rbx)
movq $0, 16(%rbx)
testq %rdi, %rdi
je .L82
subq %rdi, %rsi
call _ZdlPvm@PLT
jmp .L82
.L81:
movq 8(%rsp), %rbx
addq %r12, %rbx
cmpq %r12, %rbx
je .L84
leaq 40(%rsp), %rbp
jmp .L85
.L120:
addq $24, %r15
cmpq %r15, %rbx
je .L84
.L85:
movq %rbp, %rsi
movq %r15, %rdi
.LEHB5:
call _ZNSt6vectorIPfSaIS0_EEaSERKS2_
jmp .L120
.L80:
leaq 40(%rsp), %rdx
movq %rbx, %rsi
subq %rax, %rsi
movq %r13, %rdi
call _ZSt18__do_uninit_fill_nIPSt6vectorIPfSaIS1_EEmS3_ET_S5_T0_RKT1_
movq %rax, %rdx
movq %rax, 8(%rbp)
movq %r13, %rsi
movq %r12, %rdi
call _ZSt16__do_uninit_copyISt13move_iteratorIPSt6vectorIPfSaIS2_EEES5_ET0_T_S8_S7_
movq 8(%rsp), %rax
addq %rax, 8(%rbp)
cmpq %r13, %r12
je .L84
leaq 40(%rsp), %rbx
jmp .L86
.L121:
addq $24, %r15
cmpq %r15, %r13
je .L84
.L86:
movq %rbx, %rsi
movq %r15, %rdi
call _ZNSt6vectorIPfSaIS0_EEaSERKS2_
.LEHE5:
jmp .L121
.L84:
testq %r14, %r14
je .L72
movq (%rsp), %rsi
movq %r14, %rdi
call _ZdlPvm@PLT
jmp .L72
.L74:
movq (%rdi), %r15
movq %r14, %rax
subq %r15, %rax
sarq $3, %rax
movabsq $-6148914691236517205, %rdx
imulq %rdx, %rax
movabsq $384307168202282325, %rdx
subq %rax, %rdx
cmpq %rbx, %rdx
jb .L122
cmpq %rax, %rbx
movq %rax, %rdx
cmovnb %rbx, %rdx
addq %rax, %rdx
jc .L90
movabsq $384307168202282325, %rax
cmpq %rax, %rdx
cmovbe %rdx, %rax
movq %rax, 8(%rsp)
movq %rsi, %rax
subq %r15, %rax
movq %rax, 16(%rsp)
movq $0, (%rsp)
testq %rdx, %rdx
je .L91
jmp .L106
.L122:
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L123
leaq .LC1(%rip), %rdi
.LEHB6:
call _ZSt20__throw_length_errorPKc@PLT
.L123:
call __stack_chk_fail@PLT
.L125:
movq (%rsp), %rdx
movq %r12, %rsi
movq %r15, %rdi
call _ZSt16__do_uninit_copyISt13move_iteratorIPSt6vectorIPfSaIS2_EEES5_ET0_T_S8_S7_
leaq (%rbx,%rbx,2), %rdx
leaq (%rax,%rdx,8), %rdx
movq %r14, %rsi
movq %r12, %rdi
call _ZSt16__do_uninit_copyISt13move_iteratorIPSt6vectorIPfSaIS2_EEES5_ET0_T_S8_S7_
movq %rax, %r12
cmpq %r15, %r14
je .L92
movq %r15, %rbx
jmp .L94
.L93:
addq $24, %rbx
cmpq %rbx, %r14
je .L92
.L94:
movq (%rbx), %rdi
testq %rdi, %rdi
je .L93
movq 16(%rbx), %rsi
subq %rdi, %rsi
call _ZdlPvm@PLT
jmp .L93
.L92:
testq %r15, %r15
je .L95
movq 16(%rbp), %rsi
subq %r15, %rsi
movq %r15, %rdi
call _ZdlPvm@PLT
.L95:
movq (%rsp), %rcx
movq %rcx, 0(%rbp)
movq %r12, 8(%rbp)
movq 8(%rsp), %rax
leaq (%rax,%rax,2), %rax
leaq (%rcx,%rax,8), %rax
movq %rax, 16(%rbp)
.L72:
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L124
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L110:
.cfi_restore_state
endbr64
movq %rax, %rbx
leaq 40(%rsp), %rdi
call _ZNSt6vectorIPfSaIS0_EED1Ev
movq 72(%rsp), %rax
subq %fs:40, %rax
je .L97
call __stack_chk_fail@PLT
.L97:
movq %rbx, %rdi
call _Unwind_Resume@PLT
.LEHE6:
.L111:
endbr64
movq %rax, %rdi
call __cxa_begin_catch@PLT
cmpq $0, (%rsp)
jne .L99
movq 16(%rsp), %rax
sarq $3, %rax
movabsq $-6148914691236517205, %rdx
imulq %rdx, %rax
addq %rax, %rbx
imulq $24, %rbx, %rbx
.L100:
movq 24(%rsp), %rax
cmpq %rax, %rbx
je .L102
movq 24(%rsp), %r14
movq %r14, %rdi
call _ZNSt6vectorIPfSaIS0_EED1Ev
movq %r14, %rax
addq $24, %rax
movq %rax, 24(%rsp)
jmp .L100
.L99:
imulq $24, 8(%rsp), %rsi
movq (%rsp), %rdi
call _ZdlPvm@PLT
.L102:
movq 72(%rsp), %rax
subq %fs:40, %rax
je .L103
call __stack_chk_fail@PLT
.L103:
.LEHB7:
call __cxa_rethrow@PLT
.LEHE7:
.L112:
endbr64
movq %rax, %rbx
call __cxa_end_catch@PLT
movq 72(%rsp), %rax
subq %fs:40, %rax
je .L105
call __stack_chk_fail@PLT
.L105:
movq %rbx, %rdi
.LEHB8:
call _Unwind_Resume@PLT
.L90:
movq %rsi, %rax
subq %r15, %rax
movq %rax, 16(%rsp)
movabsq $384307168202282325, %rax
movq %rax, 8(%rsp)
.L106:
movq 8(%rsp), %rax
leaq (%rax,%rax,2), %rdi
salq $3, %rdi
call _Znwm@PLT
.LEHE8:
movq %rax, (%rsp)
.L91:
movq (%rsp), %rax
movq 16(%rsp), %rcx
addq %rcx, %rax
movq %rax, 24(%rsp)
movq %r13, %rdx
movq %rbx, %rsi
movq %rax, %rdi
.LEHB9:
call _ZSt18__do_uninit_fill_nIPSt6vectorIPfSaIS1_EEmS3_ET_S5_T0_RKT1_
.LEHE9:
jmp .L125
.L124:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE4617:
.section .gcc_except_table._ZNSt6vectorIS_IPfSaIS0_EESaIS2_EE14_M_fill_insertEN9__gnu_cxx17__normal_iteratorIPS2_S4_EEmRKS2_,"aG",@progbits,_ZNSt6vectorIS_IPfSaIS0_EESaIS2_EE14_M_fill_insertEN9__gnu_cxx17__normal_iteratorIPS2_S4_EEmRKS2_,comdat
.align 4
.LLSDA4617:
.byte 0xff
.byte 0x9b
.uleb128 .LLSDATT4617-.LLSDATTD4617
.LLSDATTD4617:
.byte 0x1
.uleb128 .LLSDACSE4617-.LLSDACSB4617
.LLSDACSB4617:
.uleb128 .LEHB4-.LFB4617
.uleb128 .LEHE4-.LEHB4
.uleb128 0
.uleb128 0
.uleb128 .LEHB5-.LFB4617
.uleb128 .LEHE5-.LEHB5
.uleb128 .L110-.LFB4617
.uleb128 0
.uleb128 .LEHB6-.LFB4617
.uleb128 .LEHE6-.LEHB6
.uleb128 0
.uleb128 0
.uleb128 .LEHB7-.LFB4617
.uleb128 .LEHE7-.LEHB7
.uleb128 .L112-.LFB4617
.uleb128 0
.uleb128 .LEHB8-.LFB4617
.uleb128 .LEHE8-.LEHB8
.uleb128 0
.uleb128 0
.uleb128 .LEHB9-.LFB4617
.uleb128 .LEHE9-.LEHB9
.uleb128 .L111-.LFB4617
.uleb128 0x1
.LLSDACSE4617:
.byte 0x1
.byte 0
.align 4
.long 0
.LLSDATT4617:
.section .text._ZNSt6vectorIS_IPfSaIS0_EESaIS2_EE14_M_fill_insertEN9__gnu_cxx17__normal_iteratorIPS2_S4_EEmRKS2_,"axG",@progbits,_ZNSt6vectorIS_IPfSaIS0_EESaIS2_EE14_M_fill_insertEN9__gnu_cxx17__normal_iteratorIPS2_S4_EEmRKS2_,comdat
.size _ZNSt6vectorIS_IPfSaIS0_EESaIS2_EE14_M_fill_insertEN9__gnu_cxx17__normal_iteratorIPS2_S4_EEmRKS2_, .-_ZNSt6vectorIS_IPfSaIS0_EESaIS2_EE14_M_fill_insertEN9__gnu_cxx17__normal_iteratorIPS2_S4_EEmRKS2_
.section .rodata.str1.1,"aMS",@progbits,1
.LC3:
.string "vector1d_host: ("
.LC4:
.string ","
.LC5:
.string ")"
.LC6:
.string "vector1d_host, slice in Z "
.LC7:
.string ":"
.LC8:
.string "\t"
.LC9:
.string "vector3d_host: ("
.LC10:
.string "vector3d_host, slice in Z "
.text
.globl main
.type main, @function
main:
.LFB4032:
.cfi_startproc
.cfi_personality 0x9b,DW.ref.__gxx_personality_v0
.cfi_lsda 0x1b,.LLSDA4032
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $152, %rsp
.cfi_def_cfa_offset 208
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
movq $0, 48(%rsp)
movq $0, 56(%rsp)
movq $0, 64(%rsp)
movq $0, 80(%rsp)
movq $0, 88(%rsp)
movq $0, 96(%rsp)
movl $96, %edi
.LEHB10:
call _Znwm@PLT
movq %rax, %rbx
movl $0x00000000, (%rax)
movl $0, %eax
.L127:
addq $4, %rax
cmpq $96, %rax
je .L191
movl $0x00000000, (%rbx,%rax)
jmp .L127
.L191:
leaq 96(%rbx), %rax
movq %rbx, 48(%rsp)
movq %rax, 56(%rsp)
movq %rax, 64(%rsp)
movq $0, 120(%rsp)
movq $0, 128(%rsp)
movl $24, %edi
call _Znwm@PLT
.LEHE10:
movq %rax, 112(%rsp)
leaq 24(%rax), %rdx
movq %rdx, 128(%rsp)
movq $0, (%rax)
movq $0, 8(%rax)
movq $0, 16(%rax)
movq %rdx, 120(%rsp)
movq 88(%rsp), %r12
movq 80(%rsp), %rdx
movq %r12, %rax
subq %rdx, %rax
cmpq $24, %rax
jbe .L192
cmpq $48, %rax
jbe .L130
leaq 48(%rdx), %r13
cmpq %r13, %r12
je .L130
movq %r13, %rbp
jmp .L132
.L192:
leaq 112(%rsp), %rcx
sarq $3, %rax
movabsq $-6148914691236517205, %rdx
imulq %rdx, %rax
movl $2, %edx
subq %rax, %rdx
leaq 80(%rsp), %rdi
movq %r12, %rsi
.LEHB11:
call _ZNSt6vectorIS_IPfSaIS0_EESaIS2_EE14_M_fill_insertEN9__gnu_cxx17__normal_iteratorIPS2_S4_EEmRKS2_
.LEHE11:
jmp .L130
.L131:
addq $24, %rbp
cmpq %rbp, %r12
je .L193
.L132:
movq 0(%rbp), %rdi
testq %rdi, %rdi
je .L131
movq 16(%rbp), %rsi
subq %rdi, %rsi
call _ZdlPvm@PLT
jmp .L131
.L193:
movq %r13, 88(%rsp)
.L130:
leaq 112(%rsp), %rdi
call _ZNSt6vectorIPfSaIS0_EED1Ev
movq 80(%rsp), %rax
movq (%rax), %rax
movq %rbx, (%rax)
movq 80(%rsp), %rax
movq (%rax), %rdx
leaq 16(%rbx), %rcx
movq %rcx, 8(%rdx)
movq (%rax), %rax
leaq 32(%rbx), %rdx
movq %rdx, 16(%rax)
leaq 48(%rbx), %rsi
movq %rsi, %rdx
movl $0, %eax
.L133:
movq 80(%rsp), %rcx
movq 24(%rcx), %rcx
movq %rdx, (%rcx,%rax)
addq $8, %rax
addq $16, %rdx
cmpq $24, %rax
jne .L133
movq %rbx, 8(%rsp)
movq %rbx, %rcx
movl $0, %r13d
.L137:
movl $0, %eax
.L136:
leal 0(%r13,%rax), %edx
pxor %xmm0, %xmm0
cvtsi2ssq %rdx, %xmm0
movss %xmm0, (%rcx,%rax,4)
addq $1, %rax
cmpq $4, %rax
jne .L136
addl $4, %r13d
addq $16, %rcx
cmpl $12, %r13d
jne .L137
movl %r13d, %ecx
.L138:
movl $0, %eax
.L141:
leal (%rcx,%rax), %edx
pxor %xmm0, %xmm0
cvtsi2ssq %rdx, %xmm0
movss %xmm0, (%rsi,%rax,4)
addq $1, %rax
cmpq $4, %rax
jne .L141
addl $4, %ecx
addq $16, %rsi
cmpl $24, %ecx
jne .L138
leaq 24(%rsp), %rdi
movl $96, %esi
.LEHB12:
call cudaMalloc@PLT
movl $1, %ecx
movl $96, %edx
movq %rbx, %rsi
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
movl $2, 44(%rsp)
movl $1, 112(%rsp)
movl $2, 116(%rsp)
movl $2, 120(%rsp)
movl $2, 36(%rsp)
movl $2, 40(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 36(%rsp), %rdx
movl $2, %ecx
movq 112(%rsp), %rdi
movl $2, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L143
movl $4, %ecx
movl $3, %edx
movl $2, %esi
movq 24(%rsp), %rdi
call _Z46__device_stub__Z23scanTheadInformationGPUPfiiiPfiii
.L143:
movl $2, %ecx
movl $96, %edx
movq 24(%rsp), %rsi
movq %rbx, %rdi
call cudaMemcpy@PLT
leaq .LC3(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl $2, %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
leaq .LC4(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl $3, %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
leaq .LC4(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl $4, %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
leaq .LC5(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movq $0, (%rsp)
jmp .L157
.L204:
movq (%rsp), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZNSo9_M_insertImEERSoT_@PLT
movq %rax, %rbx
movl $1, %edx
leaq .LC7(%rip), %rsi
movq %rax, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movq (%rbx), %rax
movq -24(%rax), %rax
movq 240(%rbx,%rax), %rbp
testq %rbp, %rbp
je .L194
cmpb $0, 56(%rbp)
je .L146
movzbl 67(%rbp), %esi
.L147:
movsbl %sil, %esi
movq %rbx, %rdi
call _ZNSo3putEc@PLT
jmp .L195
.L194:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L196
call _ZSt16__throw_bad_castv@PLT
.L196:
call __stack_chk_fail@PLT
.L146:
movq %rbp, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq 0(%rbp), %rax
movl $10, %esi
movq %rbp, %rdi
call *48(%rax)
movl %eax, %esi
jmp .L147
.L195:
movq %rax, %rdi
call _ZNSo5flushEv@PLT
leal -12(%r13), %r15d
movq 8(%rsp), %r12
leaq _ZSt4cout(%rip), %rbp
jmp .L148
.L202:
cmpl $3, %ebx
je .L197
movl $1, %edx
leaq .LC8(%rip), %rsi
movq %rbp, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
jmp .L154
.L197:
movq 0(%rbp), %rax
movq -24(%rax), %rax
movq 240(%rbp,%rax), %r14
testq %r14, %r14
je .L198
cmpb $0, 56(%r14)
je .L152
movzbl 67(%r14), %esi
.L153:
movsbl %sil, %esi
movq %rbp, %rdi
call _ZNSo3putEc@PLT
jmp .L199
.L198:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L200
call _ZSt16__throw_bad_castv@PLT
.L177:
endbr64
movq %rax, %rbx
jmp .L173
.L200:
call __stack_chk_fail@PLT
.L152:
movq %r14, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq (%r14), %rax
movl $10, %esi
movq %r14, %rdi
call *48(%rax)
movl %eax, %esi
jmp .L153
.L199:
movq %rax, %rdi
call _ZNSo5flushEv@PLT
.L154:
addq $1, %rbx
cmpq $4, %rbx
je .L201
.L155:
pxor %xmm0, %xmm0
cvtss2sd (%r12,%rbx,4), %xmm0
movq %rbp, %rdi
call _ZNSo9_M_insertIdEERSoT_@PLT
jmp .L202
.L201:
addl $4, %r15d
addq $16, %r12
cmpl %r13d, %r15d
je .L156
.L148:
movl $0, %ebx
jmp .L155
.L156:
addq $1, (%rsp)
movq (%rsp), %rax
addl $12, %r13d
addq $48, 8(%rsp)
cmpq $2, %rax
je .L203
.L157:
movl $26, %edx
leaq .LC6(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
jmp .L204
.L203:
leaq .LC9(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl $2, %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
leaq .LC4(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl $3, %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
leaq .LC4(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl $4, %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
leaq .LC5(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movl $0, %r13d
movq $0, (%rsp)
leaq .LC8(%rip), %r15
jmp .L171
.L217:
movq (%rsp), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZNSo9_M_insertImEERSoT_@PLT
movq %rax, %rbx
movl $1, %edx
leaq .LC7(%rip), %rsi
movq %rax, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movq (%rbx), %rax
movq -24(%rax), %rax
movq 240(%rbx,%rax), %rbp
testq %rbp, %rbp
je .L205
cmpb $0, 56(%rbp)
je .L160
movzbl 67(%rbp), %esi
.L161:
movsbl %sil, %esi
movq %rbx, %rdi
call _ZNSo3putEc@PLT
jmp .L206
.L205:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L207
call _ZSt16__throw_bad_castv@PLT
.L207:
call __stack_chk_fail@PLT
.L160:
movq %rbp, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq 0(%rbp), %rax
movl $10, %esi
movq %rbp, %rdi
call *48(%rax)
movl %eax, %esi
jmp .L161
.L206:
movq %rax, %rdi
call _ZNSo5flushEv@PLT
jmp .L208
.L214:
cmpl $3, %ebx
je .L209
movl $1, %edx
movq %r15, %rsi
movq %rbp, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
jmp .L168
.L209:
movq 0(%rbp), %rax
movq -24(%rax), %rax
movq 240(%rbp,%rax), %r14
testq %r14, %r14
je .L210
cmpb $0, 56(%r14)
je .L166
movzbl 67(%r14), %esi
.L167:
movsbl %sil, %esi
movq %rbp, %rdi
call _ZNSo3putEc@PLT
jmp .L211
.L210:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L212
call _ZSt16__throw_bad_castv@PLT
.L212:
call __stack_chk_fail@PLT
.L166:
movq %r14, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq (%r14), %rax
movl $10, %esi
movq %r14, %rdi
call *48(%rax)
movl %eax, %esi
jmp .L167
.L211:
movq %rax, %rdi
call _ZNSo5flushEv@PLT
.L168:
addq $1, %rbx
cmpq $4, %rbx
je .L213
.L169:
movq 80(%rsp), %rax
movq (%rax,%r13), %rax
movq (%rax,%r12), %rax
pxor %xmm0, %xmm0
cvtss2sd (%rax,%rbx,4), %xmm0
movq %rbp, %rdi
call _ZNSo9_M_insertIdEERSoT_@PLT
jmp .L214
.L213:
addq $8, %r12
cmpq $24, %r12
je .L215
.L162:
movl $0, %ebx
jmp .L169
.L208:
movl $0, %r12d
leaq _ZSt4cout(%rip), %rbp
jmp .L162
.L215:
addq $1, (%rsp)
movq (%rsp), %rax
addq $24, %r13
cmpq $2, %rax
je .L216
.L171:
movl $26, %edx
leaq .LC10(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
.LEHE12:
jmp .L217
.L216:
leaq 80(%rsp), %rdi
call _ZNSt6vectorIS_IPfSaIS0_EESaIS2_EED1Ev
leaq 48(%rsp), %rdi
call _ZNSt6vectorIfSaIfEED1Ev
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L218
movl $0, %eax
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L178:
.cfi_restore_state
endbr64
movq %rax, %rbx
leaq 112(%rsp), %rdi
call _ZNSt6vectorIPfSaIS0_EED1Ev
.L173:
leaq 80(%rsp), %rdi
call _ZNSt6vectorIS_IPfSaIS0_EESaIS2_EED1Ev
leaq 48(%rsp), %rdi
call _ZNSt6vectorIfSaIfEED1Ev
movq 136(%rsp), %rax
subq %fs:40, %rax
je .L174
call __stack_chk_fail@PLT
.L174:
movq %rbx, %rdi
.LEHB13:
call _Unwind_Resume@PLT
.LEHE13:
.L218:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE4032:
.section .gcc_except_table,"a",@progbits
.LLSDA4032:
.byte 0xff
.byte 0xff
.byte 0x1
.uleb128 .LLSDACSE4032-.LLSDACSB4032
.LLSDACSB4032:
.uleb128 .LEHB10-.LFB4032
.uleb128 .LEHE10-.LEHB10
.uleb128 .L177-.LFB4032
.uleb128 0
.uleb128 .LEHB11-.LFB4032
.uleb128 .LEHE11-.LEHB11
.uleb128 .L178-.LFB4032
.uleb128 0
.uleb128 .LEHB12-.LFB4032
.uleb128 .LEHE12-.LEHB12
.uleb128 .L177-.LFB4032
.uleb128 0
.uleb128 .LEHB13-.LFB4032
.uleb128 .LEHE13-.LEHB13
.uleb128 0
.uleb128 0
.LLSDACSE4032:
.text
.size main, .-main
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.hidden DW.ref.__gxx_personality_v0
.weak DW.ref.__gxx_personality_v0
.section .data.rel.local.DW.ref.__gxx_personality_v0,"awG",@progbits,DW.ref.__gxx_personality_v0,comdat
.align 8
.type DW.ref.__gxx_personality_v0, @object
.size DW.ref.__gxx_personality_v0, 8
DW.ref.__gxx_personality_v0:
.quad __gxx_personality_v0
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | //=============================================================================================
// Name : thread3dStl.cu
// Author : Jose Refojo
// Version :
// Creation date : 26-02-2014
// Copyright : Copyright belongs to Trinity Centre for High Performance Computing
// Description : This program will initialize a number of arrays stored in stl vectors,
// then it will grab data from each thread (such as thread position inside the block and block),
// save it, send it back into the main memory, and print it
//=============================================================================================
#include <iostream>
#include <stdio.h>
#include <vector>
using namespace std;
#define BLOCK_SIZE 2
__global__ void scanTheadInformationGPU(float *threadXIdsGPU,int N,int M,int L) {
int idx=blockIdx.x*blockDim.x+threadIdx.x;
int idy=blockIdx.y*blockDim.y+threadIdx.y;
int idz=blockIdx.z*blockDim.z+threadIdx.z;
if ( idx < N ) {
if ( idy < M ) {
if ( idz < L ) {
threadXIdsGPU[idx+idy*N+idz*N*M]=-(idx+idy*N+idz*N*M);
//threadXIdsGPU[idz+idy*L+idx*M*L]=-(idz+idy*L+idx*M*L);
}
}
}
}
int main() {
// pointers to host memory matrices
std::vector< float > vector1d;
std::vector< std::vector< float* > > vector3d;
float *vector1d_gpu;
// pointers to device memory matrices
//float *vectorGPU;
// N,M and L are the sizes on each dimension
int N=2,M=3,L=4,totalSize;
unsigned int ui,uj,uk;
totalSize=N*M*L;
// Allocate arrays threadIds and blockIds on host
vector1d.resize(totalSize);
vector3d.resize(N, std::vector< float* > (M));
for (ui=0;ui<N;ui++) {
for (uj=0;uj<M;uj++) {
vector3d[ui][uj]=&(vector1d[uj*L+ui*M*L]);
}
}
for (ui=0;ui<N;ui++) {
for (uj=0;uj<M;uj++) {
for (uk=0;uk<L;uk++) {
//vector1d[ui+uj*N+uk*N*M]=ui+uj*N+uk*N*M;
vector1d[uk+uj*L+ui*M*L]=uk+uj*L+ui*M*L;
}
}
}
// Allocate arrays threadIdsGPU and blockIdsGPU on device
cudaMalloc ((void **) &vector1d_gpu, sizeof(float)*N*M*L);
// Copy data from host memory to device memory
cudaMemcpy(vector1d_gpu, &(vector1d[0]), sizeof(float)*N*M*L, cudaMemcpyHostToDevice);
int block_size=BLOCK_SIZE;
dim3 dimBlock(block_size,block_size,block_size);
dim3 dimGrid ( (N/dimBlock.x) + (!(N%dimBlock.x)?0:1),(M/dimBlock.y) + (!(M%dimBlock.y)?0:1),(L/dimBlock.y) + (!(L%dimBlock.y)?0:1));
// Call the kernel
scanTheadInformationGPU <<<dimGrid,dimBlock>>> (vector1d_gpu,N,M,L);
// Copy data from device memory to host memory
cudaMemcpy(&(vector1d[0]), vector1d_gpu, sizeof(float)*N*M*L, cudaMemcpyDeviceToHost);
// Print all the data about the threads
cout << "vector1d_host: (" << N << "," << M << "," << L << ")" << endl;
for (ui=0;ui<N;ui++) {
cout << "vector1d_host, slice in Z " << ui << ":" << endl;
for (uj=0;uj<M;uj++) {
for (uk=0;uk<L;uk++) {
cout << vector1d[uk+uj*L+ui*M*L];
if (uk==L-1) { cout << endl;} else { cout << "\t"; }
}
}
}
cout << "vector3d_host: (" << N << "," << M << "," << L << ")" << endl;
for (ui=0;ui<N;ui++) {
cout << "vector3d_host, slice in Z " << ui << ":" << endl;
for (uj=0;uj<M;uj++) {
for (uk=0;uk<L;uk++) {
cout << vector3d[ui][uj][uk];
if (uk==L-1) { cout << endl;} else { cout << "\t"; }
}
}
}
/*
cout << "vector1d_host:" <<endl;
for (ui=0; ui<numRows; ui++) {
for (uj=0; uj<numColumns; uj++) {
cout << input2d_host[ui][uj];
if (uj==numColumns-1) { cout << endl;} else { cout << "\t"; }
}
}
cout << endl;
*/
} | //=============================================================================================
// Name : thread3dStl.cu
// Author : Jose Refojo
// Version :
// Creation date : 26-02-2014
// Copyright : Copyright belongs to Trinity Centre for High Performance Computing
// Description : This program will initialize a number of arrays stored in stl vectors,
// then it will grab data from each thread (such as thread position inside the block and block),
// save it, send it back into the main memory, and print it
//=============================================================================================
#include <hip/hip_runtime.h>
#include <iostream>
#include <stdio.h>
#include <vector>
using namespace std;
#define BLOCK_SIZE 2
__global__ void scanTheadInformationGPU(float *threadXIdsGPU,int N,int M,int L) {
int idx=blockIdx.x*blockDim.x+threadIdx.x;
int idy=blockIdx.y*blockDim.y+threadIdx.y;
int idz=blockIdx.z*blockDim.z+threadIdx.z;
if ( idx < N ) {
if ( idy < M ) {
if ( idz < L ) {
threadXIdsGPU[idx+idy*N+idz*N*M]=-(idx+idy*N+idz*N*M);
//threadXIdsGPU[idz+idy*L+idx*M*L]=-(idz+idy*L+idx*M*L);
}
}
}
}
int main() {
// pointers to host memory matrices
std::vector< float > vector1d;
std::vector< std::vector< float* > > vector3d;
float *vector1d_gpu;
// pointers to device memory matrices
//float *vectorGPU;
// N,M and L are the sizes on each dimension
int N=2,M=3,L=4,totalSize;
unsigned int ui,uj,uk;
totalSize=N*M*L;
// Allocate arrays threadIds and blockIds on host
vector1d.resize(totalSize);
vector3d.resize(N, std::vector< float* > (M));
for (ui=0;ui<N;ui++) {
for (uj=0;uj<M;uj++) {
vector3d[ui][uj]=&(vector1d[uj*L+ui*M*L]);
}
}
for (ui=0;ui<N;ui++) {
for (uj=0;uj<M;uj++) {
for (uk=0;uk<L;uk++) {
//vector1d[ui+uj*N+uk*N*M]=ui+uj*N+uk*N*M;
vector1d[uk+uj*L+ui*M*L]=uk+uj*L+ui*M*L;
}
}
}
// Allocate arrays threadIdsGPU and blockIdsGPU on device
hipMalloc ((void **) &vector1d_gpu, sizeof(float)*N*M*L);
// Copy data from host memory to device memory
hipMemcpy(vector1d_gpu, &(vector1d[0]), sizeof(float)*N*M*L, hipMemcpyHostToDevice);
int block_size=BLOCK_SIZE;
dim3 dimBlock(block_size,block_size,block_size);
dim3 dimGrid ( (N/dimBlock.x) + (!(N%dimBlock.x)?0:1),(M/dimBlock.y) + (!(M%dimBlock.y)?0:1),(L/dimBlock.y) + (!(L%dimBlock.y)?0:1));
// Call the kernel
scanTheadInformationGPU <<<dimGrid,dimBlock>>> (vector1d_gpu,N,M,L);
// Copy data from device memory to host memory
hipMemcpy(&(vector1d[0]), vector1d_gpu, sizeof(float)*N*M*L, hipMemcpyDeviceToHost);
// Print all the data about the threads
cout << "vector1d_host: (" << N << "," << M << "," << L << ")" << endl;
for (ui=0;ui<N;ui++) {
cout << "vector1d_host, slice in Z " << ui << ":" << endl;
for (uj=0;uj<M;uj++) {
for (uk=0;uk<L;uk++) {
cout << vector1d[uk+uj*L+ui*M*L];
if (uk==L-1) { cout << endl;} else { cout << "\t"; }
}
}
}
cout << "vector3d_host: (" << N << "," << M << "," << L << ")" << endl;
for (ui=0;ui<N;ui++) {
cout << "vector3d_host, slice in Z " << ui << ":" << endl;
for (uj=0;uj<M;uj++) {
for (uk=0;uk<L;uk++) {
cout << vector3d[ui][uj][uk];
if (uk==L-1) { cout << endl;} else { cout << "\t"; }
}
}
}
/*
cout << "vector1d_host:" <<endl;
for (ui=0; ui<numRows; ui++) {
for (uj=0; uj<numColumns; uj++) {
cout << input2d_host[ui][uj];
if (uj==numColumns-1) { cout << endl;} else { cout << "\t"; }
}
}
cout << endl;
*/
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | //=============================================================================================
// Name : thread3dStl.cu
// Author : Jose Refojo
// Version :
// Creation date : 26-02-2014
// Copyright : Copyright belongs to Trinity Centre for High Performance Computing
// Description : This program will initialize a number of arrays stored in stl vectors,
// then it will grab data from each thread (such as thread position inside the block and block),
// save it, send it back into the main memory, and print it
//=============================================================================================
#include <hip/hip_runtime.h>
#include <iostream>
#include <stdio.h>
#include <vector>
using namespace std;
#define BLOCK_SIZE 2
__global__ void scanTheadInformationGPU(float *threadXIdsGPU,int N,int M,int L) {
int idx=blockIdx.x*blockDim.x+threadIdx.x;
int idy=blockIdx.y*blockDim.y+threadIdx.y;
int idz=blockIdx.z*blockDim.z+threadIdx.z;
if ( idx < N ) {
if ( idy < M ) {
if ( idz < L ) {
threadXIdsGPU[idx+idy*N+idz*N*M]=-(idx+idy*N+idz*N*M);
//threadXIdsGPU[idz+idy*L+idx*M*L]=-(idz+idy*L+idx*M*L);
}
}
}
}
int main() {
// pointers to host memory matrices
std::vector< float > vector1d;
std::vector< std::vector< float* > > vector3d;
float *vector1d_gpu;
// pointers to device memory matrices
//float *vectorGPU;
// N,M and L are the sizes on each dimension
int N=2,M=3,L=4,totalSize;
unsigned int ui,uj,uk;
totalSize=N*M*L;
// Allocate arrays threadIds and blockIds on host
vector1d.resize(totalSize);
vector3d.resize(N, std::vector< float* > (M));
for (ui=0;ui<N;ui++) {
for (uj=0;uj<M;uj++) {
vector3d[ui][uj]=&(vector1d[uj*L+ui*M*L]);
}
}
for (ui=0;ui<N;ui++) {
for (uj=0;uj<M;uj++) {
for (uk=0;uk<L;uk++) {
//vector1d[ui+uj*N+uk*N*M]=ui+uj*N+uk*N*M;
vector1d[uk+uj*L+ui*M*L]=uk+uj*L+ui*M*L;
}
}
}
// Allocate arrays threadIdsGPU and blockIdsGPU on device
hipMalloc ((void **) &vector1d_gpu, sizeof(float)*N*M*L);
// Copy data from host memory to device memory
hipMemcpy(vector1d_gpu, &(vector1d[0]), sizeof(float)*N*M*L, hipMemcpyHostToDevice);
int block_size=BLOCK_SIZE;
dim3 dimBlock(block_size,block_size,block_size);
dim3 dimGrid ( (N/dimBlock.x) + (!(N%dimBlock.x)?0:1),(M/dimBlock.y) + (!(M%dimBlock.y)?0:1),(L/dimBlock.y) + (!(L%dimBlock.y)?0:1));
// Call the kernel
scanTheadInformationGPU <<<dimGrid,dimBlock>>> (vector1d_gpu,N,M,L);
// Copy data from device memory to host memory
hipMemcpy(&(vector1d[0]), vector1d_gpu, sizeof(float)*N*M*L, hipMemcpyDeviceToHost);
// Print all the data about the threads
cout << "vector1d_host: (" << N << "," << M << "," << L << ")" << endl;
for (ui=0;ui<N;ui++) {
cout << "vector1d_host, slice in Z " << ui << ":" << endl;
for (uj=0;uj<M;uj++) {
for (uk=0;uk<L;uk++) {
cout << vector1d[uk+uj*L+ui*M*L];
if (uk==L-1) { cout << endl;} else { cout << "\t"; }
}
}
}
cout << "vector3d_host: (" << N << "," << M << "," << L << ")" << endl;
for (ui=0;ui<N;ui++) {
cout << "vector3d_host, slice in Z " << ui << ":" << endl;
for (uj=0;uj<M;uj++) {
for (uk=0;uk<L;uk++) {
cout << vector3d[ui][uj][uk];
if (uk==L-1) { cout << endl;} else { cout << "\t"; }
}
}
}
/*
cout << "vector1d_host:" <<endl;
for (ui=0; ui<numRows; ui++) {
for (uj=0; uj<numColumns; uj++) {
cout << input2d_host[ui][uj];
if (uj==numColumns-1) { cout << endl;} else { cout << "\t"; }
}
}
cout << endl;
*/
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z23scanTheadInformationGPUPfiii
.globl _Z23scanTheadInformationGPUPfiii
.p2align 8
.type _Z23scanTheadInformationGPUPfiii,@function
_Z23scanTheadInformationGPUPfiii:
s_clause 0x2
s_load_b64 s[2:3], s[0:1], 0x24
s_load_b64 s[4:5], s[0:1], 0x8
s_load_b32 s6, s[0:1], 0x10
v_and_b32_e32 v2, 0x3ff, v0
v_bfe_u32 v3, v0, 10, 10
v_bfe_u32 v4, v0, 20, 10
s_waitcnt lgkmcnt(0)
s_and_b32 s7, s2, 0xffff
s_lshr_b32 s2, s2, 16
v_mad_u64_u32 v[0:1], null, s13, s7, v[2:3]
s_and_b32 s3, s3, 0xffff
v_mad_u64_u32 v[1:2], null, s14, s2, v[3:4]
v_mad_u64_u32 v[2:3], null, s15, s3, v[4:5]
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_cmp_gt_i32_e32 vcc_lo, s4, v0
v_cmp_gt_i32_e64 s2, s5, v1
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cmp_gt_i32_e64 s3, s6, v2
s_and_b32 s2, vcc_lo, s2
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
s_and_b32 s2, s2, s3
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_2
v_mad_u64_u32 v[3:4], null, v2, s5, v[1:2]
s_load_b64 s[0:1], s[0:1], 0x0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, v3, s4, v[0:1]
v_ashrrev_i32_e32 v2, 31, v1
v_sub_nc_u32_e32 v3, 0, v1
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[0:1], 2, v[1:2]
v_cvt_f32_i32_e32 v2, v3
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z23scanTheadInformationGPUPfiii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 13
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 1
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 2
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z23scanTheadInformationGPUPfiii, .Lfunc_end0-_Z23scanTheadInformationGPUPfiii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 12
.size: 4
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z23scanTheadInformationGPUPfiii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z23scanTheadInformationGPUPfiii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z23scanTheadInformationGPUPfiii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R2, SR_CTAID.Y ; /* 0x0000000000027919 */
/* 0x000e280000002600 */
/*0020*/ S2R R5, SR_TID.Y ; /* 0x0000000000057919 */
/* 0x000e280000002200 */
/*0030*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e680000002500 */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e680000002100 */
/*0050*/ S2R R4, SR_CTAID.Z ; /* 0x0000000000047919 */
/* 0x000ea80000002700 */
/*0060*/ S2R R7, SR_TID.Z ; /* 0x0000000000077919 */
/* 0x000ea20000002300 */
/*0070*/ IMAD R2, R2, c[0x0][0x4], R5 ; /* 0x0000010002027a24 */
/* 0x001fca00078e0205 */
/*0080*/ ISETP.GE.AND P0, PT, R2, c[0x0][0x16c], PT ; /* 0x00005b0002007a0c */
/* 0x000fe20003f06270 */
/*0090*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x002fca00078e0203 */
/*00a0*/ ISETP.GE.OR P0, PT, R0, c[0x0][0x168], P0 ; /* 0x00005a0000007a0c */
/* 0x000fe20000706670 */
/*00b0*/ IMAD R3, R4, c[0x0][0x8], R7 ; /* 0x0000020004037a24 */
/* 0x004fca00078e0207 */
/*00c0*/ ISETP.GE.OR P0, PT, R3, c[0x0][0x170], P0 ; /* 0x00005c0003007a0c */
/* 0x000fda0000706670 */
/*00d0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00e0*/ IMAD R3, R3, c[0x0][0x16c], R2 ; /* 0x00005b0003037a24 */
/* 0x000fe200078e0202 */
/*00f0*/ HFMA2.MMA R2, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff027435 */
/* 0x000fe200000001ff */
/*0100*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0110*/ IMAD R3, R3, c[0x0][0x168], R0 ; /* 0x00005a0003037a24 */
/* 0x000fca00078e0200 */
/*0120*/ IADD3 R5, -R3.reuse, RZ, RZ ; /* 0x000000ff03057210 */
/* 0x040fe40007ffe1ff */
/*0130*/ IMAD.WIDE R2, R3, R2, c[0x0][0x160] ; /* 0x0000580003027625 */
/* 0x000fc800078e0202 */
/*0140*/ I2F R5, R5 ; /* 0x0000000500057306 */
/* 0x000e240000201400 */
/*0150*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x001fe2000c101904 */
/*0160*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0170*/ BRA 0x170; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z23scanTheadInformationGPUPfiii
.globl _Z23scanTheadInformationGPUPfiii
.p2align 8
.type _Z23scanTheadInformationGPUPfiii,@function
_Z23scanTheadInformationGPUPfiii:
s_clause 0x2
s_load_b64 s[2:3], s[0:1], 0x24
s_load_b64 s[4:5], s[0:1], 0x8
s_load_b32 s6, s[0:1], 0x10
v_and_b32_e32 v2, 0x3ff, v0
v_bfe_u32 v3, v0, 10, 10
v_bfe_u32 v4, v0, 20, 10
s_waitcnt lgkmcnt(0)
s_and_b32 s7, s2, 0xffff
s_lshr_b32 s2, s2, 16
v_mad_u64_u32 v[0:1], null, s13, s7, v[2:3]
s_and_b32 s3, s3, 0xffff
v_mad_u64_u32 v[1:2], null, s14, s2, v[3:4]
v_mad_u64_u32 v[2:3], null, s15, s3, v[4:5]
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_cmp_gt_i32_e32 vcc_lo, s4, v0
v_cmp_gt_i32_e64 s2, s5, v1
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cmp_gt_i32_e64 s3, s6, v2
s_and_b32 s2, vcc_lo, s2
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
s_and_b32 s2, s2, s3
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_2
v_mad_u64_u32 v[3:4], null, v2, s5, v[1:2]
s_load_b64 s[0:1], s[0:1], 0x0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, v3, s4, v[0:1]
v_ashrrev_i32_e32 v2, 31, v1
v_sub_nc_u32_e32 v3, 0, v1
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[0:1], 2, v[1:2]
v_cvt_f32_i32_e32 v2, v3
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z23scanTheadInformationGPUPfiii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 13
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 1
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 2
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z23scanTheadInformationGPUPfiii, .Lfunc_end0-_Z23scanTheadInformationGPUPfiii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 12
.size: 4
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z23scanTheadInformationGPUPfiii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z23scanTheadInformationGPUPfiii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <stdio.h>
#include <stdlib.h>
__global__ void isExecuted(int *dev_a, int blockid, int threadid){
if(blockIdx.x == blockid && threadIdx.x == threadid)
*dev_a = 1;
else
*dev_a = 0;
}
int main(){
// Declare variables and allocate memory on the GPU.
int a[1], *dev_a;
cudaMalloc((void**) &dev_a, sizeof(int));
// Execute kernel and copy the result to CPU memory.
isExecuted<<<100,100>>>(dev_a, 2, 4); // NOTE: INDEXING OF THREADS AND BLOCKS STARTS FROM 0.
cudaMemcpy(a, dev_a, sizeof(int), cudaMemcpyDeviceToHost);
// Print result and free dynamically allocated memory.
printf("a[0] = %d\n", a[0]); // REMEMBER: INDEXING IN C STARTS FROM 0.
cudaFree(dev_a);
} | code for sm_80
Function : _Z10isExecutedPiii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R2, SR_TID.X ; /* 0x0000000000027919 */
/* 0x000e220000002100 */
/*0020*/ IMAD.MOV.U32 R3, RZ, RZ, c[0x0][0x164] ; /* 0x00005900ff037624 */
/* 0x000fe200078e00ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e620000002500 */
/*0050*/ ISETP.NE.AND P0, PT, R2, c[0x0][0x16c], PT ; /* 0x00005b0002007a0c */
/* 0x001fe20003f05270 */
/*0060*/ IMAD.MOV.U32 R2, RZ, RZ, c[0x0][0x160] ; /* 0x00005800ff027624 */
/* 0x000fc600078e00ff */
/*0070*/ ISETP.EQ.AND P0, PT, R0, c[0x0][0x168], !P0 ; /* 0x00005a0000007a0c */
/* 0x002fc80004702270 */
/*0080*/ SEL R5, RZ, 0x1, !P0 ; /* 0x00000001ff057807 */
/* 0x000fca0004000000 */
/*0090*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe2000c101904 */
/*00a0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00b0*/ BRA 0xb0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
#include <stdlib.h>
__global__ void isExecuted(int *dev_a, int blockid, int threadid){
if(blockIdx.x == blockid && threadIdx.x == threadid)
*dev_a = 1;
else
*dev_a = 0;
}
int main(){
// Declare variables and allocate memory on the GPU.
int a[1], *dev_a;
cudaMalloc((void**) &dev_a, sizeof(int));
// Execute kernel and copy the result to CPU memory.
isExecuted<<<100,100>>>(dev_a, 2, 4); // NOTE: INDEXING OF THREADS AND BLOCKS STARTS FROM 0.
cudaMemcpy(a, dev_a, sizeof(int), cudaMemcpyDeviceToHost);
// Print result and free dynamically allocated memory.
printf("a[0] = %d\n", a[0]); // REMEMBER: INDEXING IN C STARTS FROM 0.
cudaFree(dev_a);
} | .file "tmpxft_000d82f5_00000000-6_identify_naive2.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z32__device_stub__Z10isExecutedPiiiPiii
.type _Z32__device_stub__Z10isExecutedPiiiPiii, @function
_Z32__device_stub__Z10isExecutedPiiiPiii:
.LFB2082:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movl %esi, 4(%rsp)
movl %edx, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
leaq 4(%rsp), %rax
movq %rax, 88(%rsp)
movq %rsp, %rax
movq %rax, 96(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z10isExecutedPiii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z32__device_stub__Z10isExecutedPiiiPiii, .-_Z32__device_stub__Z10isExecutedPiiiPiii
.globl _Z10isExecutedPiii
.type _Z10isExecutedPiii, @function
_Z10isExecutedPiii:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z32__device_stub__Z10isExecutedPiiiPiii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z10isExecutedPiii, .-_Z10isExecutedPiii
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "a[0] = %d\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
subq $56, %rsp
.cfi_def_cfa_offset 64
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
movq %rsp, %rdi
movl $4, %esi
call cudaMalloc@PLT
movl $100, 24(%rsp)
movl $1, 28(%rsp)
movl $100, 12(%rsp)
movl $1, 16(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 24(%rsp), %rdx
movl $1, %ecx
movq 12(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L15
.L12:
leaq 36(%rsp), %rdi
movl $2, %ecx
movl $4, %edx
movq (%rsp), %rsi
call cudaMemcpy@PLT
movl 36(%rsp), %edx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq (%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L16
movl $0, %eax
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
movl $4, %edx
movl $2, %esi
movq (%rsp), %rdi
call _Z32__device_stub__Z10isExecutedPiiiPiii
jmp .L12
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC1:
.string "_Z10isExecutedPiii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z10isExecutedPiii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include <stdlib.h>
__global__ void isExecuted(int *dev_a, int blockid, int threadid){
if(blockIdx.x == blockid && threadIdx.x == threadid)
*dev_a = 1;
else
*dev_a = 0;
}
int main(){
// Declare variables and allocate memory on the GPU.
int a[1], *dev_a;
cudaMalloc((void**) &dev_a, sizeof(int));
// Execute kernel and copy the result to CPU memory.
isExecuted<<<100,100>>>(dev_a, 2, 4); // NOTE: INDEXING OF THREADS AND BLOCKS STARTS FROM 0.
cudaMemcpy(a, dev_a, sizeof(int), cudaMemcpyDeviceToHost);
// Print result and free dynamically allocated memory.
printf("a[0] = %d\n", a[0]); // REMEMBER: INDEXING IN C STARTS FROM 0.
cudaFree(dev_a);
} | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
__global__ void isExecuted(int *dev_a, int blockid, int threadid){
if(blockIdx.x == blockid && threadIdx.x == threadid)
*dev_a = 1;
else
*dev_a = 0;
}
int main(){
// Declare variables and allocate memory on the GPU.
int a[1], *dev_a;
hipMalloc((void**) &dev_a, sizeof(int));
// Execute kernel and copy the result to CPU memory.
isExecuted<<<100,100>>>(dev_a, 2, 4); // NOTE: INDEXING OF THREADS AND BLOCKS STARTS FROM 0.
hipMemcpy(a, dev_a, sizeof(int), hipMemcpyDeviceToHost);
// Print result and free dynamically allocated memory.
printf("a[0] = %d\n", a[0]); // REMEMBER: INDEXING IN C STARTS FROM 0.
hipFree(dev_a);
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
__global__ void isExecuted(int *dev_a, int blockid, int threadid){
if(blockIdx.x == blockid && threadIdx.x == threadid)
*dev_a = 1;
else
*dev_a = 0;
}
int main(){
// Declare variables and allocate memory on the GPU.
int a[1], *dev_a;
hipMalloc((void**) &dev_a, sizeof(int));
// Execute kernel and copy the result to CPU memory.
isExecuted<<<100,100>>>(dev_a, 2, 4); // NOTE: INDEXING OF THREADS AND BLOCKS STARTS FROM 0.
hipMemcpy(a, dev_a, sizeof(int), hipMemcpyDeviceToHost);
// Print result and free dynamically allocated memory.
printf("a[0] = %d\n", a[0]); // REMEMBER: INDEXING IN C STARTS FROM 0.
hipFree(dev_a);
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z10isExecutedPiii
.globl _Z10isExecutedPiii
.p2align 8
.type _Z10isExecutedPiii,@function
_Z10isExecutedPiii:
s_load_b128 s[0:3], s[0:1], 0x0
s_waitcnt lgkmcnt(0)
v_cmp_eq_u32_e32 vcc_lo, s3, v0
s_cmp_eq_u32 s15, s2
v_mov_b32_e32 v0, 0
s_cselect_b32 s2, -1, 0
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s2, s2, vcc_lo
v_cndmask_b32_e64 v1, 0, 1, s2
global_store_b32 v0, v1, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z10isExecutedPiii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 16
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 2
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z10isExecutedPiii, .Lfunc_end0-_Z10isExecutedPiii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 12
.size: 4
.value_kind: by_value
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 16
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z10isExecutedPiii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z10isExecutedPiii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 2
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
__global__ void isExecuted(int *dev_a, int blockid, int threadid){
if(blockIdx.x == blockid && threadIdx.x == threadid)
*dev_a = 1;
else
*dev_a = 0;
}
int main(){
// Declare variables and allocate memory on the GPU.
int a[1], *dev_a;
hipMalloc((void**) &dev_a, sizeof(int));
// Execute kernel and copy the result to CPU memory.
isExecuted<<<100,100>>>(dev_a, 2, 4); // NOTE: INDEXING OF THREADS AND BLOCKS STARTS FROM 0.
hipMemcpy(a, dev_a, sizeof(int), hipMemcpyDeviceToHost);
// Print result and free dynamically allocated memory.
printf("a[0] = %d\n", a[0]); // REMEMBER: INDEXING IN C STARTS FROM 0.
hipFree(dev_a);
} | .text
.file "identify_naive2.hip"
.globl _Z25__device_stub__isExecutedPiii # -- Begin function _Z25__device_stub__isExecutedPiii
.p2align 4, 0x90
.type _Z25__device_stub__isExecutedPiii,@function
_Z25__device_stub__isExecutedPiii: # @_Z25__device_stub__isExecutedPiii
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movl %esi, 4(%rsp)
movl %edx, (%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 4(%rsp), %rax
movq %rax, 72(%rsp)
movq %rsp, %rax
movq %rax, 80(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z10isExecutedPiii, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z25__device_stub__isExecutedPiii, .Lfunc_end0-_Z25__device_stub__isExecutedPiii
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rsp, %rdi
movl $4, %esi
callq hipMalloc
movabsq $4294967396, %rdi # imm = 0x100000064
movl $1, %esi
movq %rdi, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
movq (%rsp), %rax
movq %rax, 96(%rsp)
movl $2, 12(%rsp)
movl $4, 8(%rsp)
leaq 96(%rsp), %rax
movq %rax, 16(%rsp)
leaq 12(%rsp), %rax
movq %rax, 24(%rsp)
leaq 8(%rsp), %rax
movq %rax, 32(%rsp)
leaq 80(%rsp), %rdi
leaq 64(%rsp), %rsi
leaq 56(%rsp), %rdx
leaq 48(%rsp), %rcx
callq __hipPopCallConfiguration
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
movq 64(%rsp), %rcx
movl 72(%rsp), %r8d
leaq 16(%rsp), %r9
movl $_Z10isExecutedPiii, %edi
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
pushq 64(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
movq (%rsp), %rsi
leaq 16(%rsp), %rdi
movl $4, %edx
movl $2, %ecx
callq hipMemcpy
movl 16(%rsp), %esi
movl $.L.str, %edi
xorl %eax, %eax
callq printf
movq (%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $104, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z10isExecutedPiii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z10isExecutedPiii,@object # @_Z10isExecutedPiii
.section .rodata,"a",@progbits
.globl _Z10isExecutedPiii
.p2align 3, 0x0
_Z10isExecutedPiii:
.quad _Z25__device_stub__isExecutedPiii
.size _Z10isExecutedPiii, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "a[0] = %d\n"
.size .L.str, 11
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z10isExecutedPiii"
.size .L__unnamed_1, 19
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z25__device_stub__isExecutedPiii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z10isExecutedPiii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z10isExecutedPiii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R2, SR_TID.X ; /* 0x0000000000027919 */
/* 0x000e220000002100 */
/*0020*/ IMAD.MOV.U32 R3, RZ, RZ, c[0x0][0x164] ; /* 0x00005900ff037624 */
/* 0x000fe200078e00ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e620000002500 */
/*0050*/ ISETP.NE.AND P0, PT, R2, c[0x0][0x16c], PT ; /* 0x00005b0002007a0c */
/* 0x001fe20003f05270 */
/*0060*/ IMAD.MOV.U32 R2, RZ, RZ, c[0x0][0x160] ; /* 0x00005800ff027624 */
/* 0x000fc600078e00ff */
/*0070*/ ISETP.EQ.AND P0, PT, R0, c[0x0][0x168], !P0 ; /* 0x00005a0000007a0c */
/* 0x002fc80004702270 */
/*0080*/ SEL R5, RZ, 0x1, !P0 ; /* 0x00000001ff057807 */
/* 0x000fca0004000000 */
/*0090*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe2000c101904 */
/*00a0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00b0*/ BRA 0xb0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z10isExecutedPiii
.globl _Z10isExecutedPiii
.p2align 8
.type _Z10isExecutedPiii,@function
_Z10isExecutedPiii:
s_load_b128 s[0:3], s[0:1], 0x0
s_waitcnt lgkmcnt(0)
v_cmp_eq_u32_e32 vcc_lo, s3, v0
s_cmp_eq_u32 s15, s2
v_mov_b32_e32 v0, 0
s_cselect_b32 s2, -1, 0
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s2, s2, vcc_lo
v_cndmask_b32_e64 v1, 0, 1, s2
global_store_b32 v0, v1, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z10isExecutedPiii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 16
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 2
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z10isExecutedPiii, .Lfunc_end0-_Z10isExecutedPiii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 12
.size: 4
.value_kind: by_value
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 16
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z10isExecutedPiii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z10isExecutedPiii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 2
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000d82f5_00000000-6_identify_naive2.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z32__device_stub__Z10isExecutedPiiiPiii
.type _Z32__device_stub__Z10isExecutedPiiiPiii, @function
_Z32__device_stub__Z10isExecutedPiiiPiii:
.LFB2082:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movl %esi, 4(%rsp)
movl %edx, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
leaq 4(%rsp), %rax
movq %rax, 88(%rsp)
movq %rsp, %rax
movq %rax, 96(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z10isExecutedPiii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z32__device_stub__Z10isExecutedPiiiPiii, .-_Z32__device_stub__Z10isExecutedPiiiPiii
.globl _Z10isExecutedPiii
.type _Z10isExecutedPiii, @function
_Z10isExecutedPiii:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z32__device_stub__Z10isExecutedPiiiPiii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z10isExecutedPiii, .-_Z10isExecutedPiii
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "a[0] = %d\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
subq $56, %rsp
.cfi_def_cfa_offset 64
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
movq %rsp, %rdi
movl $4, %esi
call cudaMalloc@PLT
movl $100, 24(%rsp)
movl $1, 28(%rsp)
movl $100, 12(%rsp)
movl $1, 16(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 24(%rsp), %rdx
movl $1, %ecx
movq 12(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L15
.L12:
leaq 36(%rsp), %rdi
movl $2, %ecx
movl $4, %edx
movq (%rsp), %rsi
call cudaMemcpy@PLT
movl 36(%rsp), %edx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq (%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L16
movl $0, %eax
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
movl $4, %edx
movl $2, %esi
movq (%rsp), %rdi
call _Z32__device_stub__Z10isExecutedPiiiPiii
jmp .L12
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC1:
.string "_Z10isExecutedPiii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z10isExecutedPiii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "identify_naive2.hip"
.globl _Z25__device_stub__isExecutedPiii # -- Begin function _Z25__device_stub__isExecutedPiii
.p2align 4, 0x90
.type _Z25__device_stub__isExecutedPiii,@function
_Z25__device_stub__isExecutedPiii: # @_Z25__device_stub__isExecutedPiii
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movl %esi, 4(%rsp)
movl %edx, (%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 4(%rsp), %rax
movq %rax, 72(%rsp)
movq %rsp, %rax
movq %rax, 80(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z10isExecutedPiii, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z25__device_stub__isExecutedPiii, .Lfunc_end0-_Z25__device_stub__isExecutedPiii
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rsp, %rdi
movl $4, %esi
callq hipMalloc
movabsq $4294967396, %rdi # imm = 0x100000064
movl $1, %esi
movq %rdi, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
movq (%rsp), %rax
movq %rax, 96(%rsp)
movl $2, 12(%rsp)
movl $4, 8(%rsp)
leaq 96(%rsp), %rax
movq %rax, 16(%rsp)
leaq 12(%rsp), %rax
movq %rax, 24(%rsp)
leaq 8(%rsp), %rax
movq %rax, 32(%rsp)
leaq 80(%rsp), %rdi
leaq 64(%rsp), %rsi
leaq 56(%rsp), %rdx
leaq 48(%rsp), %rcx
callq __hipPopCallConfiguration
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
movq 64(%rsp), %rcx
movl 72(%rsp), %r8d
leaq 16(%rsp), %r9
movl $_Z10isExecutedPiii, %edi
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
pushq 64(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
movq (%rsp), %rsi
leaq 16(%rsp), %rdi
movl $4, %edx
movl $2, %ecx
callq hipMemcpy
movl 16(%rsp), %esi
movl $.L.str, %edi
xorl %eax, %eax
callq printf
movq (%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $104, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z10isExecutedPiii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z10isExecutedPiii,@object # @_Z10isExecutedPiii
.section .rodata,"a",@progbits
.globl _Z10isExecutedPiii
.p2align 3, 0x0
_Z10isExecutedPiii:
.quad _Z25__device_stub__isExecutedPiii
.size _Z10isExecutedPiii, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "a[0] = %d\n"
.size .L.str, 11
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z10isExecutedPiii"
.size .L__unnamed_1, 19
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z25__device_stub__isExecutedPiii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z10isExecutedPiii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <assert.h>
#include <vector>
using namespace std;
const int INF = 10000000;
const int V = 10010;
const int MAX_THREAD_DIM2 = 32;
void input(char *inFileName, int B);
void output(char *outFileName);
void block_FW_2GPU(int B);
int ceil(int a, int b);
void calAsync(int gpuId, int B, int Round, int block_start_x, int block_start_y, int block_width, int block_height);
int realn;
int n, m; // Number of vertices, edges
int* Dist; // n * n, on host
int* dDist[2]; // n * n, on device
int streamSize[2];
vector<cudaStream_t> streams[2];
int getGPUId ()
{
int gpuId;
cudaGetDevice(&gpuId);
return gpuId;
}
cudaStream_t getIdleStream (int gpuId)
{
cudaSetDevice(gpuId);
if(streams[gpuId].size() == streamSize[gpuId])
{
cudaStream_t stm;
cudaStreamCreate(&stm);
streams[gpuId].push_back(stm);
streamSize[gpuId]++;
return stm;
}
else
return streams[gpuId][streamSize[gpuId]++];
}
void syncAllStreams ()
{
cudaThreadSynchronize();
streamSize[0] = 0;
streamSize[1] = 0;
}
void blockCopyAsync (int gpuId, int* dst, const int* src, cudaMemcpyKind kind, cudaStream_t stream, int B, int bi0, int bi1, int bj0, int bj1)
{
cudaSetDevice(gpuId);
for(int i = bi0 * B; i < bi1 * B; ++i)
{
int offset = i * n + bj0 * B;
int size = (bj1 - bj0) * B * sizeof(int);
cudaMemcpyAsync(dst + offset, src + offset, size, kind, stream);
}
}
int main(int argc, char* argv[])
{
int B = atoi(argv[3]);
input(argv[1], B);
block_FW_2GPU(B);
output(argv[2]);
return 0;
}
void input(char *inFileName, int B)
{
FILE *infile = fopen(inFileName, "r");
fscanf(infile, "%d %d", &realn, &m);
n = ceil(realn, B) * B;
cudaMallocManaged(&Dist, n * n * sizeof(int));
for (int i = 0, k = 0; i < n; ++i) {
for (int j = 0; j < n; ++j, ++k) {
if (i == j) Dist[k] = 0;
else Dist[k] = INF;
}
}
while (--m >= 0) {
int a, b, v;
fscanf(infile, "%d %d %d", &a, &b, &v);
--a, --b;
Dist[a * n + b] = v;
}
}
void output(char *outFileName)
{
FILE *outfile = fopen(outFileName, "w");
for (int i = 0; i < realn; ++i) {
for (int j = 0; j < realn; ++j) {
int d = Dist[i * n + j];
if (d >= INF) fprintf(outfile, "INF ");
else fprintf(outfile, "%d ", d);
}
fprintf(outfile, "\n");
}
cudaFree(Dist);
}
void print ()
{
for (int i = 0; i < realn; ++i) {
for (int j = 0; j < realn; ++j) {
int d = Dist[i * n + j];
if (d >= INF) fprintf(stderr, "INF ");
else fprintf(stderr, "%d ", d);
}
fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
}
int ceil(int a, int b)
{
return (a + b -1)/b;
}
void block_FW_2GPU(int B)
{
int round = ceil(n, B);
for (int r = 0; r < round; ++r) {
/* Phase 1*/
fprintf(stderr, "Round: %d\n", r);
calAsync(0, B, r, r, r, 1, 1);
syncAllStreams();
/* Phase 2*/
calAsync(0, B, r, r, 0, r, 1); // L 0
calAsync(0, B, r, r, r +1, round - r -1, 1); // R 0
calAsync(1, B, r, 0, r, 1, r); // U 1
calAsync(1, B, r, r +1, r, 1, round - r -1); // D 1
syncAllStreams();
/* Phase 3*/
calAsync(0, B, r, 0, 0, r, r); // <^
calAsync(1, B, r, 0, r +1, round -r -1, r); // ^>
calAsync(1, B, r, r +1, 0, r, round - r -1); // <v
calAsync(0, B, r, r +1, r +1, round -r -1, round - r -1); // v>
syncAllStreams();
}
}
__global__
void Update (int k, int i0, int j0, int i1, int j1, int* dDist, int n)
{
#define D(i,j) (dDist[(i) * n + (j)])
int i = blockDim.x * blockIdx.x + threadIdx.x + i0;
int j = blockDim.y * blockIdx.y + threadIdx.y + j0;
if(i >= i1 || j >= j1)
return;
int Dik = D(i, k);
int Dkj = D(k, j);
int D1 = Dik + Dkj;
if (D1 < D(i, j))
D(i, j) = D1;
}
__global__
void UpdateIndependent (int k0, int k1, int i0, int j0, int i1, int j1, int* dDist, int n)
{
#define D(i,j) (dDist[(i) * n + (j)])
int tx = threadIdx.x;
int ty = threadIdx.y;
int di = blockDim.x * blockIdx.x + tx;
int dj = blockDim.y * blockIdx.y + ty;
int i = i0 + di;
int j = j0 + dj;
bool valid = i < i1 && j < j1;
__shared__ int Si[MAX_THREAD_DIM2][MAX_THREAD_DIM2];
__shared__ int Sj[MAX_THREAD_DIM2][MAX_THREAD_DIM2];
const int cacheSize = MAX_THREAD_DIM2;
int Dij = valid? D(i, j): 0;
int dkmod = 0;
for(int k = k0; k < k1; ++k)
{
if(dkmod == 0)
{
__syncthreads();
if(i < i1 && k+ty < k1)
Si[ty][tx] = D(i, k+ty);
if(j < j1 && k+tx < k1)
Sj[tx][ty] = D(k+tx, j);
__syncthreads();
}
if(valid)
{
// assert(Si[tx][dkmod] == D(i,k));
// assert(Sj[dkmod][ty] == D(k,j));
// int Dik = D(i, k);
// int Dkj = D(k, j);
int Dik = Si[dkmod][tx];
int Dkj = Sj[dkmod][ty];
int D1 = Dik + Dkj;
if (D1 < Dij)
Dij = D1;
}
dkmod = (dkmod + 1) % cacheSize;
}
if(valid)
D(i, j) = Dij;
}
void calAsync(int gpuId, int B, int Round, int block_start_x, int block_start_y, int block_width, int block_height)
{
cudaSetDevice(gpuId);
int block_end_x = block_start_x + block_height;
int block_end_y = block_start_y + block_width;
for (int b_i = block_start_x; b_i < block_end_x; ++b_i) {
for (int b_j = block_start_y; b_j < block_end_y; ++b_j) {
// To calculate B*B elements in the block (b_i, b_j)
// For each block, it need to compute B times
// for (int k = Round * B; k < (Round +1) * B && k < n; ++k) {
// To calculate original index of elements in the block (b_i, b_j)
// For instance, original index of (0,0) in block (1,2) is (2,5) for V=6,B=2
int i0 = b_i * B;
int i1 = min((b_i +1) * B, n);
int j0 = b_j * B;
int j1 = min((b_j +1) * B, n);
int k0 = Round * B;
int k1 = min((Round +1) * B, n);
bool iDepends = i0 == k0;
bool jDepends = j0 == k0;
int threadDim = MAX_THREAD_DIM2;//std::min(B, MAX_THREAD_DIM2);
int blockDim = (B + MAX_THREAD_DIM2 - 1) / MAX_THREAD_DIM2;
dim3 grid(blockDim, blockDim), block(threadDim, threadDim);
cudaStream_t stm = getIdleStream(gpuId);
if(iDepends || jDepends)
{
for(int k=k0; k<k1; ++k)
Update<<<grid, block, 0, stm>>>(k, i0, j0, i1, j1, Dist, n);
}
else
UpdateIndependent<<<grid, block, 0, stm>>>(k0, k1, i0, j0, i1, j1, Dist, n);
// for (int i = i0; i < i1; ++i) {
// for (int j = j0; j < j1; ++j) {
// if (Dist[i][k] + Dist[k][j] < Dist[i][j])
// Dist[i][j] = Dist[i][k] + Dist[k][j];
// }
// }
// }
}
}
} | code for sm_80
Function : _Z17UpdateIndependentiiiiiiPii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.Y ; /* 0x0000000000007919 */
/* 0x000e220000002600 */
/*0020*/ IMAD.MOV.U32 R10, RZ, RZ, c[0x0][0x160] ; /* 0x00005800ff0a7624 */
/* 0x000fe200078e00ff */
/*0030*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fe20000000a00 */
/*0040*/ IMAD.MOV.U32 R15, RZ, RZ, 0x4 ; /* 0x00000004ff0f7424 */
/* 0x000fe200078e00ff */
/*0050*/ S2R R3, SR_TID.Y ; /* 0x0000000000037919 */
/* 0x000e220000002200 */
/*0060*/ BSSY B0, 0x160 ; /* 0x000000f000007945 */
/* 0x000fe20003800000 */
/*0070*/ ISETP.GE.AND P1, PT, R10, c[0x0][0x164], PT ; /* 0x000059000a007a0c */
/* 0x000fe20003f26270 */
/*0080*/ IMAD.MOV.U32 R6, RZ, RZ, RZ ; /* 0x000000ffff067224 */
/* 0x000fe200078e00ff */
/*0090*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */
/* 0x000e680000002500 */
/*00a0*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000e620000002100 */
/*00b0*/ IMAD R4, R0, c[0x0][0x4], R3 ; /* 0x0000010000047a24 */
/* 0x001fca00078e0203 */
/*00c0*/ IADD3 R4, R4, c[0x0][0x16c], RZ ; /* 0x00005b0004047a10 */
/* 0x000fe20007ffe0ff */
/*00d0*/ IMAD R7, R2, c[0x0][0x0], R5 ; /* 0x0000000002077a24 */
/* 0x002fc600078e0205 */
/*00e0*/ ISETP.GE.AND P0, PT, R4, c[0x0][0x174], PT ; /* 0x00005d0004007a0c */
/* 0x000fe40003f06270 */
/*00f0*/ IADD3 R7, R7, c[0x0][0x168], RZ ; /* 0x00005a0007077a10 */
/* 0x000fc80007ffe0ff */
/*0100*/ ISETP.LT.AND P0, PT, R7.reuse, c[0x0][0x170], !P0 ; /* 0x00005c0007007a0c */
/* 0x040fe20004701270 */
/*0110*/ IMAD R12, R7, c[0x0][0x180], R4 ; /* 0x00006000070c7a24 */
/* 0x000fc800078e0204 */
/*0120*/ IMAD.WIDE R12, R12, R15, c[0x0][0x178] ; /* 0x00005e000c0c7625 */
/* 0x000fd000078e020f */
/*0130*/ @!P0 BRA 0x150 ; /* 0x0000001000008947 */
/* 0x000fea0003800000 */
/*0140*/ LDG.E R6, [R12.64] ; /* 0x000000060c067981 */
/* 0x000164000c1e1900 */
/*0150*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0160*/ ULDC UR4, c[0x0][0x160] ; /* 0x0000580000047ab9 */
/* 0x000fe20000000800 */
/*0170*/ @P1 BRA 0xe20 ; /* 0x00000ca000001947 */
/* 0x000fea0003800000 */
/*0180*/ LOP3.LUT R8, RZ, c[0x0][0x160], RZ, 0x33, !PT ; /* 0x00005800ff087a12 */
/* 0x000fe200078e33ff */
/*0190*/ IMAD.SHL.U32 R9, R3, 0x4, RZ ; /* 0x0000000403097824 */
/* 0x000fe200078e00ff */
/*01a0*/ LEA R14, R5, 0x1000, 0x7 ; /* 0x00001000050e7811 */
/* 0x000fe200078e38ff */
/*01b0*/ IMAD.MOV.U32 R28, RZ, RZ, RZ ; /* 0x000000ffff1c7224 */
/* 0x000fe200078e00ff */
/*01c0*/ IADD3 R8, R8, c[0x0][0x164], RZ ; /* 0x0000590008087a10 */
/* 0x000fe40007ffe0ff */
/*01d0*/ IADD3 R10, -R10, c[0x0][0x164], RZ ; /* 0x000059000a0a7a10 */
/* 0x000fe20007ffe1ff */
/*01e0*/ IMAD R11, R3, 0x4, R14 ; /* 0x00000004030b7824 */
/* 0x000fe200078e020e */
/*01f0*/ ISETP.GE.U32.AND P1, PT, R8, 0x3, PT ; /* 0x000000030800780c */
/* 0x000fe20003f26070 */
/*0200*/ IMAD.SHL.U32 R8, R5, 0x4, RZ ; /* 0x0000000405087824 */
/* 0x000fe200078e00ff */
/*0210*/ LOP3.LUT R10, R10, 0x3, RZ, 0xc0, !PT ; /* 0x000000030a0a7812 */
/* 0x000fc600078ec0ff */
/*0220*/ IMAD R18, R3, 0x80, R8 ; /* 0x0000008003127824 */
/* 0x000fd000078e0208 */
/*0230*/ @!P1 BRA 0xb40 ; /* 0x0000090000009947 */
/* 0x000fea0003800000 */
/*0240*/ IADD3 R17, R5, c[0x0][0x168], RZ ; /* 0x00005a0005117a10 */
/* 0x000fe20007ffe0ff */
/*0250*/ IMAD.MOV.U32 R25, RZ, RZ, c[0x0][0x4] ; /* 0x00000100ff197624 */
/* 0x000fe200078e00ff */
/*0260*/ IADD3 R23, R3.reuse, c[0x0][0x160], RZ ; /* 0x0000580003177a10 */
/* 0x040fe20007ffe0ff */
/*0270*/ IMAD.MOV.U32 R28, RZ, RZ, RZ ; /* 0x000000ffff1c7224 */
/* 0x000fe200078e00ff */
/*0280*/ IADD3 R21, R3, c[0x0][0x16c], RZ ; /* 0x00005b0003157a10 */
/* 0x000fe20007ffe0ff */
/*0290*/ IMAD R14, R2, c[0x0][0x0], R17 ; /* 0x00000000020e7a24 */
/* 0x000fe200078e0211 */
/*02a0*/ IADD3 R19, R5, 0x1, RZ ; /* 0x0000000105137810 */
/* 0x000fe20007ffe0ff */
/*02b0*/ IMAD R20, R0, R25, c[0x0][0x16c] ; /* 0x00005b0000147624 */
/* 0x000fe200078e0219 */
/*02c0*/ IADD3 R22, R3, 0x1, RZ ; /* 0x0000000103167810 */
/* 0x000fe20007ffe0ff */
/*02d0*/ IMAD R14, R14, c[0x0][0x180], R23 ; /* 0x000060000e0e7a24 */
/* 0x000fe200078e0217 */
/*02e0*/ IADD3 R23, R23, 0x3, RZ ; /* 0x0000000317177810 */
/* 0x000fe20007ffe0ff */
/*02f0*/ IMAD R26, R0, c[0x0][0x4], R21 ; /* 0x00000100001a7a24 */
/* 0x000fc400078e0215 */
/*0300*/ IMAD.WIDE R14, R14, R15, c[0x0][0x178] ; /* 0x00005e000e0e7625 */
/* 0x000fc800078e020f */
/*0310*/ IMAD.MOV.U32 R17, RZ, RZ, R15 ; /* 0x000000ffff117224 */
/* 0x000fe200078e000f */
/*0320*/ IADD3 R15, R5, c[0x0][0x160], RZ ; /* 0x00005800050f7a10 */
/* 0x000fc80007ffe0ff */
/*0330*/ IADD3 R25, R15.reuse, 0x2, RZ ; /* 0x000000020f197810 */
/* 0x040fe20007ffe0ff */
/*0340*/ IMAD R20, R15.reuse, c[0x0][0x180], R20 ; /* 0x000060000f147a24 */
/* 0x040fe200078e0214 */
/*0350*/ IADD3 R27, R15.reuse, 0x1, RZ ; /* 0x000000010f1b7810 */
/* 0x040fe40007ffe0ff */
/*0360*/ IADD3 R21, R15, 0x3, RZ ; /* 0x000000030f157810 */
/* 0x000fe20007ffe0ff */
/*0370*/ IMAD R24, R25, c[0x0][0x180], R26.reuse ; /* 0x0000600019187a24 */
/* 0x100fe400078e021a */
/*0380*/ IMAD R25, R27, c[0x0][0x180], R26.reuse ; /* 0x000060001b197a24 */
/* 0x100fe200078e021a */
/*0390*/ IADD3 R27, R10, -c[0x0][0x164], RZ ; /* 0x800059000a1b7a10 */
/* 0x000fe20007ffe0ff */
/*03a0*/ IMAD R26, R21, c[0x0][0x180], R26 ; /* 0x00006000151a7a24 */
/* 0x000fe400078e021a */
/*03b0*/ ISETP.NE.AND P1, PT, R28, RZ, PT ; /* 0x000000ff1c00720c */
/* 0x000fe20003f25270 */
/*03c0*/ IMAD.MOV.U32 R15, RZ, RZ, R17 ; /* 0x000000ffff0f7224 */
/* 0x000fd800078e0011 */
/*03d0*/ @P1 BRA 0x4d0 ; /* 0x000000f000001947 */
/* 0x000fea0003800000 */
/*03e0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*03f0*/ IADD3 R16, R3, UR4, RZ ; /* 0x0000000403107c10 */
/* 0x000fc8000fffe0ff */
/*0400*/ ISETP.GE.AND P1, PT, R16, c[0x0][0x164], PT ; /* 0x0000590010007a0c */
/* 0x000fc80003f26270 */
/*0410*/ ISETP.GE.OR P1, PT, R7, c[0x0][0x170], P1 ; /* 0x00005c0007007a0c */
/* 0x000fda0000f26670 */
/*0420*/ @!P1 LDG.E R29, [R14.64] ; /* 0x000000060e1d9981 */
/* 0x000ea2000c1e1900 */
/*0430*/ IADD3 R16, R5, UR4, RZ ; /* 0x0000000405107c10 */
/* 0x000fc6000fffe0ff */
/*0440*/ @!P1 STS [R18], R29 ; /* 0x0000001d12009388 */
/* 0x0043e20000000800 */
/*0450*/ ISETP.GE.AND P1, PT, R16, c[0x0][0x164], PT ; /* 0x0000590010007a0c */
/* 0x000fc80003f26270 */
/*0460*/ ISETP.GE.OR P1, PT, R4, c[0x0][0x174], P1 ; /* 0x00005d0004007a0c */
/* 0x000fda0000f26670 */
/*0470*/ @!P1 IMAD.IADD R16, R3, 0x1, R20 ; /* 0x0000000103109824 */
/* 0x000fe400078e0214 */
/*0480*/ @!P1 IMAD.MOV.U32 R17, RZ, RZ, 0x4 ; /* 0x00000004ff119424 */
/* 0x000fc800078e00ff */
/*0490*/ @!P1 IMAD.WIDE R16, R16, R17, c[0x0][0x178] ; /* 0x00005e0010109625 */
/* 0x000fcc00078e0211 */
/*04a0*/ @!P1 LDG.E R16, [R16.64] ; /* 0x0000000610109981 */
/* 0x000ea8000c1e1900 */
/*04b0*/ @!P1 STS [R11], R16 ; /* 0x000000100b009388 */
/* 0x0043e80000000800 */
/*04c0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*04d0*/ @P0 IMAD R16, R28.reuse, 0x80, R8 ; /* 0x000000801c100824 */
/* 0x042fe400078e0208 */
/*04e0*/ @P0 IMAD R29, R28.reuse, 0x80, R9 ; /* 0x000000801c1d0824 */
/* 0x040fe200078e0209 */
/*04f0*/ IADD3 R28, R28, 0x1, RZ ; /* 0x000000011c1c7810 */
/* 0x000fc60007ffe0ff */
/*0500*/ @P0 LDS R16, [R16] ; /* 0x0000000010100984 */
/* 0x000fe80000000800 */
/*0510*/ @P0 LDS R17, [R29+0x1000] ; /* 0x001000001d110984 */
/* 0x000e640000000800 */
/*0520*/ @P0 IMAD.IADD R17, R17, 0x1, R16 ; /* 0x0000000111110824 */
/* 0x002fca00078e0210 */
/*0530*/ @P0 IMNMX R6, R6, R17, PT ; /* 0x0000001106060217 */
/* 0x020fe40003800200 */
/*0540*/ SHF.R.S32.HI R17, RZ, 0x1f, R28 ; /* 0x0000001fff117819 */
/* 0x000fc8000001141c */
/*0550*/ LEA.HI R17, R17, R28, RZ, 0x5 ; /* 0x0000001c11117211 */
/* 0x000fc800078f28ff */
/*0560*/ LOP3.LUT R17, R17, 0xffffffe0, RZ, 0xc0, !PT ; /* 0xffffffe011117812 */
/* 0x000fca00078ec0ff */
/*0570*/ IMAD.IADD R28, R28, 0x1, -R17 ; /* 0x000000011c1c7824 */
/* 0x000fca00078e0a11 */
/*0580*/ ISETP.NE.AND P1, PT, R28, RZ, PT ; /* 0x000000ff1c00720c */
/* 0x000fda0003f25270 */
/*0590*/ @P1 BRA 0x680 ; /* 0x000000e000001947 */
/* 0x000fea0003800000 */
/*05a0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*05b0*/ IADD3 R16, R22, UR4, RZ ; /* 0x0000000416107c10 */
/* 0x000fc8000fffe0ff */
/*05c0*/ ISETP.GE.AND P1, PT, R16, c[0x0][0x164], PT ; /* 0x0000590010007a0c */
/* 0x000fc80003f26270 */
/*05d0*/ ISETP.GE.OR P1, PT, R7, c[0x0][0x170], P1 ; /* 0x00005c0007007a0c */
/* 0x000fda0000f26670 */
/*05e0*/ @!P1 LDG.E R29, [R14.64+0x4] ; /* 0x000004060e1d9981 */
/* 0x000ea2000c1e1900 */
/*05f0*/ IADD3 R16, R19, UR4, RZ ; /* 0x0000000413107c10 */
/* 0x000fc6000fffe0ff */
/*0600*/ @!P1 STS [R18], R29 ; /* 0x0000001d12009388 */
/* 0x0043e20000000800 */
/*0610*/ ISETP.GE.AND P1, PT, R16, c[0x0][0x164], PT ; /* 0x0000590010007a0c */
/* 0x000fc80003f26270 */
/*0620*/ ISETP.GE.OR P1, PT, R4, c[0x0][0x174], P1 ; /* 0x00005d0004007a0c */
/* 0x000fda0000f26670 */
/*0630*/ @!P1 IMAD.MOV.U32 R16, RZ, RZ, 0x4 ; /* 0x00000004ff109424 */
/* 0x000fc800078e00ff */
/*0640*/ @!P1 IMAD.WIDE R16, R25, R16, c[0x0][0x178] ; /* 0x00005e0019109625 */
/* 0x000fcc00078e0210 */
/*0650*/ @!P1 LDG.E R16, [R16.64] ; /* 0x0000000610109981 */
/* 0x000ea8000c1e1900 */
/*0660*/ @!P1 STS [R11], R16 ; /* 0x000000100b009388 */
/* 0x0043e80000000800 */
/*0670*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0680*/ @P0 IMAD R16, R28.reuse, 0x80, R8 ; /* 0x000000801c100824 */
/* 0x042fe400078e0208 */
/*0690*/ @P0 IMAD R29, R28.reuse, 0x80, R9 ; /* 0x000000801c1d0824 */
/* 0x040fe200078e0209 */
/*06a0*/ IADD3 R28, R28, 0x1, RZ ; /* 0x000000011c1c7810 */
/* 0x000fc60007ffe0ff */
/*06b0*/ @P0 LDS R16, [R16] ; /* 0x0000000010100984 */
/* 0x000fe80000000800 */
/*06c0*/ @P0 LDS R17, [R29+0x1000] ; /* 0x001000001d110984 */
/* 0x000e640000000800 */
/*06d0*/ @P0 IMAD.IADD R17, R17, 0x1, R16 ; /* 0x0000000111110824 */
/* 0x002fca00078e0210 */
/*06e0*/ @P0 IMNMX R6, R6, R17, PT ; /* 0x0000001106060217 */
/* 0x000fe40003800200 */
/*06f0*/ SHF.R.S32.HI R17, RZ, 0x1f, R28 ; /* 0x0000001fff117819 */
/* 0x000fc8000001141c */
/*0700*/ LEA.HI R17, R17, R28, RZ, 0x5 ; /* 0x0000001c11117211 */
/* 0x000fc800078f28ff */
/*0710*/ LOP3.LUT R17, R17, 0xffffffe0, RZ, 0xc0, !PT ; /* 0xffffffe011117812 */
/* 0x000fca00078ec0ff */
/*0720*/ IMAD.IADD R28, R28, 0x1, -R17 ; /* 0x000000011c1c7824 */
/* 0x000fca00078e0a11 */
/*0730*/ ISETP.NE.AND P1, PT, R28, RZ, PT ; /* 0x000000ff1c00720c */
/* 0x000fda0003f25270 */
/*0740*/ @P1 BRA 0x830 ; /* 0x000000e000001947 */
/* 0x000fea0003800000 */
/*0750*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0760*/ IADD3 R16, R23, -0x1, RZ ; /* 0xffffffff17107810 */
/* 0x000fc80007ffe0ff */
/*0770*/ ISETP.GE.AND P1, PT, R16, c[0x0][0x164], PT ; /* 0x0000590010007a0c */
/* 0x000fc80003f26270 */
/*0780*/ ISETP.GE.OR P1, PT, R7, c[0x0][0x170], P1 ; /* 0x00005c0007007a0c */
/* 0x000fda0000f26670 */
/*0790*/ @!P1 LDG.E R29, [R14.64+0x8] ; /* 0x000008060e1d9981 */
/* 0x000ea2000c1e1900 */
/*07a0*/ IADD3 R16, R21, -0x1, RZ ; /* 0xffffffff15107810 */
/* 0x000fc60007ffe0ff */
/*07b0*/ @!P1 STS [R18], R29 ; /* 0x0000001d12009388 */
/* 0x0043e20000000800 */
/*07c0*/ ISETP.GE.AND P1, PT, R16, c[0x0][0x164], PT ; /* 0x0000590010007a0c */
/* 0x000fc80003f26270 */
/*07d0*/ ISETP.GE.OR P1, PT, R4, c[0x0][0x174], P1 ; /* 0x00005d0004007a0c */
/* 0x000fda0000f26670 */
/*07e0*/ @!P1 IMAD.MOV.U32 R17, RZ, RZ, 0x4 ; /* 0x00000004ff119424 */
/* 0x000fc800078e00ff */
/*07f0*/ @!P1 IMAD.WIDE R16, R24, R17, c[0x0][0x178] ; /* 0x00005e0018109625 */
/* 0x000fcc00078e0211 */
/*0800*/ @!P1 LDG.E R16, [R16.64] ; /* 0x0000000610109981 */
/* 0x000ea8000c1e1900 */
/*0810*/ @!P1 STS [R11], R16 ; /* 0x000000100b009388 */
/* 0x0043e80000000800 */
/*0820*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0830*/ @P0 IMAD R16, R28.reuse, 0x80, R8 ; /* 0x000000801c100824 */
/* 0x042fe400078e0208 */
/*0840*/ @P0 IMAD R29, R28.reuse, 0x80, R9 ; /* 0x000000801c1d0824 */
/* 0x040fe200078e0209 */
/*0850*/ IADD3 R28, R28, 0x1, RZ ; /* 0x000000011c1c7810 */
/* 0x000fc60007ffe0ff */
/*0860*/ @P0 LDS R16, [R16] ; /* 0x0000000010100984 */
/* 0x000fe80000000800 */
/*0870*/ @P0 LDS R17, [R29+0x1000] ; /* 0x001000001d110984 */
/* 0x000e640000000800 */
/*0880*/ @P0 IMAD.IADD R17, R17, 0x1, R16 ; /* 0x0000000111110824 */
/* 0x002fca00078e0210 */
/*0890*/ @P0 IMNMX R6, R6, R17, PT ; /* 0x0000001106060217 */
/* 0x000fe40003800200 */
/*08a0*/ SHF.R.S32.HI R17, RZ, 0x1f, R28 ; /* 0x0000001fff117819 */
/* 0x000fc8000001141c */
/*08b0*/ LEA.HI R17, R17, R28, RZ, 0x5 ; /* 0x0000001c11117211 */
/* 0x000fc800078f28ff */
/*08c0*/ LOP3.LUT R17, R17, 0xffffffe0, RZ, 0xc0, !PT ; /* 0xffffffe011117812 */
/* 0x000fca00078ec0ff */
/*08d0*/ IMAD.IADD R28, R28, 0x1, -R17 ; /* 0x000000011c1c7824 */
/* 0x000fca00078e0a11 */
/*08e0*/ ISETP.NE.AND P1, PT, R28, RZ, PT ; /* 0x000000ff1c00720c */
/* 0x000fda0003f25270 */
/*08f0*/ @P1 BRA 0x9c0 ; /* 0x000000c000001947 */
/* 0x000fea0003800000 */
/*0900*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0910*/ ISETP.GE.AND P2, PT, R21, c[0x0][0x164], PT ; /* 0x0000590015007a0c */
/* 0x000fe40003f46270 */
/*0920*/ ISETP.GE.AND P1, PT, R23, c[0x0][0x164], PT ; /* 0x0000590017007a0c */
/* 0x000fe40003f26270 */
/*0930*/ ISETP.GE.OR P2, PT, R4, c[0x0][0x174], P2 ; /* 0x00005d0004007a0c */
/* 0x000fe40001746670 */
/*0940*/ ISETP.GE.OR P1, PT, R7, c[0x0][0x170], P1 ; /* 0x00005c0007007a0c */
/* 0x000fd60000f26670 */
/*0950*/ @!P2 IMAD.MOV.U32 R17, RZ, RZ, 0x4 ; /* 0x00000004ff11a424 */
/* 0x000fc800078e00ff */
/*0960*/ @!P2 IMAD.WIDE R16, R26, R17, c[0x0][0x178] ; /* 0x00005e001a10a625 */
/* 0x000fe200078e0211 */
/*0970*/ @!P1 LDG.E R29, [R14.64+0xc] ; /* 0x00000c060e1d9981 */
/* 0x000eaa000c1e1900 */
/*0980*/ @!P2 LDG.E R16, [R16.64] ; /* 0x000000061010a981 */
/* 0x000ee8000c1e1900 */
/*0990*/ @!P1 STS [R18], R29 ; /* 0x0000001d12009388 */
/* 0x0043e80000000800 */
/*09a0*/ @!P2 STS [R11], R16 ; /* 0x000000100b00a388 */
/* 0x0083e80000000800 */
/*09b0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*09c0*/ @P0 IMAD R16, R28.reuse, 0x80, R8 ; /* 0x000000801c100824 */
/* 0x042fe200078e0208 */
/*09d0*/ UIADD3 UR4, UR4, 0x4, URZ ; /* 0x0000000404047890 */
/* 0x000fe2000fffe03f */
/*09e0*/ @P0 IMAD R29, R28.reuse, 0x80, R9 ; /* 0x000000801c1d0824 */
/* 0x040fe200078e0209 */
/*09f0*/ IADD3 R28, R28, 0x1, RZ ; /* 0x000000011c1c7810 */
/* 0x000fc40007ffe0ff */
/*0a00*/ IADD3 R14, P1, R14, 0x10, RZ ; /* 0x000000100e0e7810 */
/* 0x000fe20007f3e0ff */
/*0a10*/ @P0 LDS R16, [R16] ; /* 0x0000000010100984 */
/* 0x000fe20000000800 */
/*0a20*/ IADD3 R21, R21, 0x4, RZ ; /* 0x0000000415157810 */
/* 0x000fe40007ffe0ff */
/*0a30*/ IADD3 R23, R23, 0x4, RZ ; /* 0x0000000417177810 */
/* 0x000fe20007ffe0ff */
/*0a40*/ @P0 LDS R17, [R29+0x1000] ; /* 0x001000001d110984 */
/* 0x000e640000000800 */
/*0a50*/ @P0 IMAD.IADD R17, R17, 0x1, R16 ; /* 0x0000000111110824 */
/* 0x002fca00078e0210 */
/*0a60*/ @P0 IMNMX R6, R6, R17, PT ; /* 0x0000001106060217 */
/* 0x000fe40003800200 */
/*0a70*/ SHF.R.S32.HI R17, RZ, 0x1f, R28 ; /* 0x0000001fff117819 */
/* 0x000fc8000001141c */
/*0a80*/ LEA.HI R17, R17, R28, RZ, 0x5 ; /* 0x0000001c11117211 */
/* 0x000fc800078f28ff */
/*0a90*/ LOP3.LUT R17, R17, 0xffffffe0, RZ, 0xc0, !PT ; /* 0xffffffe011117812 */
/* 0x000fca00078ec0ff */
/*0aa0*/ IMAD.IADD R28, R28, 0x1, -R17 ; /* 0x000000011c1c7824 */
/* 0x000fe400078e0a11 */
/*0ab0*/ IMAD.X R17, RZ, RZ, R15, P1 ; /* 0x000000ffff117224 */
/* 0x000fe200008e060f */
/*0ac0*/ IADD3 R15, R27, UR4, RZ ; /* 0x000000041b0f7c10 */
/* 0x000fc8000fffe0ff */
/*0ad0*/ ISETP.NE.AND P1, PT, R15, RZ, PT ; /* 0x000000ff0f00720c */
/* 0x000fe20003f25270 */
/*0ae0*/ IMAD.MOV.U32 R15, RZ, RZ, 0x4 ; /* 0x00000004ff0f7424 */
/* 0x000fc800078e00ff */
/*0af0*/ IMAD R26, R15.reuse, c[0x0][0x180], R26 ; /* 0x000060000f1a7a24 */
/* 0x040fe400078e021a */
/*0b00*/ IMAD R24, R15.reuse, c[0x0][0x180], R24 ; /* 0x000060000f187a24 */
/* 0x040fe400078e0218 */
/*0b10*/ IMAD R25, R15.reuse, c[0x0][0x180], R25 ; /* 0x000060000f197a24 */
/* 0x040fe400078e0219 */
/*0b20*/ IMAD R20, R15, c[0x0][0x180], R20 ; /* 0x000060000f147a24 */
/* 0x000fe400078e0214 */
/*0b30*/ @P1 BRA 0x3b0 ; /* 0xfffff87000001947 */
/* 0x000fea000383ffff */
/*0b40*/ ISETP.NE.AND P1, PT, R10, RZ, PT ; /* 0x000000ff0a00720c */
/* 0x000fda0003f25270 */
/*0b50*/ @!P1 BRA 0xe20 ; /* 0x000002c000009947 */
/* 0x000fea0003800000 */
/*0b60*/ IADD3 R17, R5, c[0x0][0x168], RZ ; /* 0x00005a0005117a10 */
/* 0x000fe40007ffe0ff */
/*0b70*/ IADD3 R16, R3.reuse, UR4, RZ ; /* 0x0000000403107c10 */
/* 0x040fe4000fffe0ff */
/*0b80*/ IADD3 R3, R3, c[0x0][0x16c], RZ ; /* 0x00005b0003037a10 */
/* 0x000fe20007ffe0ff */
/*0b90*/ IMAD R17, R2, c[0x0][0x0], R17 ; /* 0x0000000002117a24 */
/* 0x000fe200078e0211 */
/*0ba0*/ IADD3 R5, R5, UR4, RZ ; /* 0x0000000405057c10 */
/* 0x000fc6000fffe0ff */
/*0bb0*/ IMAD R2, R17, c[0x0][0x180], R16 ; /* 0x0000600011027a24 */
/* 0x000fe400078e0210 */
/*0bc0*/ IMAD R0, R0, c[0x0][0x4], R3 ; /* 0x0000010000007a24 */
/* 0x000fe400078e0203 */
/*0bd0*/ IMAD.WIDE R2, R2, R15, c[0x0][0x178] ; /* 0x00005e0002027625 */
/* 0x000fc800078e020f */
/*0be0*/ IMAD R0, R5, c[0x0][0x180], R0 ; /* 0x0000600005007a24 */
/* 0x000fe400078e0200 */
/*0bf0*/ IMAD.MOV.U32 R14, RZ, RZ, R2 ; /* 0x000000ffff0e7224 */
/* 0x000fe400078e0002 */
/*0c00*/ IMAD.MOV.U32 R15, RZ, RZ, R3 ; /* 0x000000ffff0f7224 */
/* 0x000fe400078e0003 */
/*0c10*/ ISETP.NE.AND P1, PT, R28.reuse, RZ, PT ; /* 0x000000ff1c00720c */
/* 0x040fe20003f25270 */
/*0c20*/ @P0 IMAD R19, R28.reuse, 0x80, R8 ; /* 0x000000801c130824 */
/* 0x040fe400078e0208 */
/*0c30*/ @P0 IMAD R20, R28, 0x80, R9 ; /* 0x000000801c140824 */
/* 0x000fd400078e0209 */
/*0c40*/ @P1 BRA 0xd10 ; /* 0x000000c000001947 */
/* 0x000fea0003800000 */
/*0c50*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0c60*/ ISETP.GE.AND P1, PT, R5, c[0x0][0x164], PT ; /* 0x0000590005007a0c */
/* 0x000fe40003f26270 */
/*0c70*/ ISETP.GE.AND P2, PT, R16, c[0x0][0x164], PT ; /* 0x0000590010007a0c */
/* 0x000fe40003f46270 */
/*0c80*/ ISETP.GE.OR P1, PT, R4, c[0x0][0x174], P1 ; /* 0x00005d0004007a0c */
/* 0x000fe40000f26670 */
/*0c90*/ ISETP.GE.OR P2, PT, R7, c[0x0][0x170], P2 ; /* 0x00005c0007007a0c */
/* 0x000fd60001746670 */
/*0ca0*/ @!P1 IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff039424 */
/* 0x000fc800078e00ff */
/*0cb0*/ @!P1 IMAD.WIDE R2, R0, R3, c[0x0][0x178] ; /* 0x00005e0000029625 */
/* 0x000fe200078e0203 */
/*0cc0*/ @!P2 LDG.E R17, [R14.64] ; /* 0x000000060e11a981 */
/* 0x000eaa000c1e1900 */
/*0cd0*/ @!P1 LDG.E R2, [R2.64] ; /* 0x0000000602029981 */
/* 0x000ee8000c1e1900 */
/*0ce0*/ @!P2 STS [R18], R17 ; /* 0x000000111200a388 */
/* 0x0043e80000000800 */
/*0cf0*/ @!P1 STS [R11], R2 ; /* 0x000000020b009388 */
/* 0x0083e80000000800 */
/*0d00*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0d10*/ @P0 LDS R19, [R19] ; /* 0x0000000013130984 */
/* 0x000fe20000000800 */
/*0d20*/ IADD3 R10, R10, -0x1, RZ ; /* 0xffffffff0a0a7810 */
/* 0x000fc40007ffe0ff */
/*0d30*/ IADD3 R28, R28, 0x1, RZ ; /* 0x000000011c1c7810 */
/* 0x000fe20007ffe0ff */
/*0d40*/ @P0 LDS R2, [R20+0x1000] ; /* 0x0010000014020984 */
/* 0x002e620000000800 */
/*0d50*/ ISETP.NE.AND P1, PT, R10, RZ, PT ; /* 0x000000ff0a00720c */
/* 0x000fe40003f25270 */
/*0d60*/ SHF.R.S32.HI R3, RZ, 0x1f, R28 ; /* 0x0000001fff037819 */
/* 0x000fe4000001141c */
/*0d70*/ IADD3 R14, P2, R14, 0x4, RZ ; /* 0x000000040e0e7810 */
/* 0x000fe40007f5e0ff */
/*0d80*/ LEA.HI R3, R3, R28, RZ, 0x5 ; /* 0x0000001c03037211 */
/* 0x000fe400078f28ff */
/*0d90*/ IADD3 R0, R0, c[0x0][0x180], RZ ; /* 0x0000600000007a10 */
/* 0x000fe20007ffe0ff */
/*0da0*/ IMAD.X R15, RZ, RZ, R15, P2 ; /* 0x000000ffff0f7224 */
/* 0x000fe200010e060f */
/*0db0*/ LOP3.LUT R17, R3, 0xffffffe0, RZ, 0xc0, !PT ; /* 0xffffffe003117812 */
/* 0x000fc400078ec0ff */
/*0dc0*/ IADD3 R5, R5, 0x1, RZ ; /* 0x0000000105057810 */
/* 0x000fe40007ffe0ff */
/*0dd0*/ IADD3 R16, R16, 0x1, RZ ; /* 0x0000000110107810 */
/* 0x000fe20007ffe0ff */
/*0de0*/ IMAD.IADD R28, R28, 0x1, -R17 ; /* 0x000000011c1c7824 */
/* 0x000fe400078e0a11 */
/*0df0*/ @P0 IMAD.IADD R3, R2, 0x1, R19 ; /* 0x0000000102030824 */
/* 0x002fca00078e0213 */
/*0e00*/ @P0 IMNMX R6, R6, R3, PT ; /* 0x0000000306060217 */
/* 0x020fe20003800200 */
/*0e10*/ @P1 BRA 0xc10 ; /* 0xfffffdf000001947 */
/* 0x000fea000383ffff */
/*0e20*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*0e30*/ STG.E [R12.64], R6 ; /* 0x000000060c007986 */
/* 0x020fe2000c101906 */
/*0e40*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0e50*/ BRA 0xe50; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0e60*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e70*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e80*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e90*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ea0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0eb0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ec0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ed0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ee0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ef0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z6UpdateiiiiiPii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R2, SR_CTAID.Y ; /* 0x0000000000027919 */
/* 0x000e280000002600 */
/*0020*/ S2R R5, SR_TID.Y ; /* 0x0000000000057919 */
/* 0x000e280000002200 */
/*0030*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e680000002500 */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e620000002100 */
/*0050*/ IMAD R2, R2, c[0x0][0x4], R5 ; /* 0x0000010002027a24 */
/* 0x001fc400078e0205 */
/*0060*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x002fc600078e0203 */
/*0070*/ IADD3 R3, R2, c[0x0][0x168], RZ ; /* 0x00005a0002037a10 */
/* 0x000fe40007ffe0ff */
/*0080*/ IADD3 R0, R0, c[0x0][0x164], RZ ; /* 0x0000590000007a10 */
/* 0x000fe40007ffe0ff */
/*0090*/ ISETP.GE.AND P0, PT, R3, c[0x0][0x170], PT ; /* 0x00005c0003007a0c */
/* 0x000fc80003f06270 */
/*00a0*/ ISETP.GE.OR P0, PT, R0, c[0x0][0x16c], P0 ; /* 0x00005b0000007a0c */
/* 0x000fda0000706670 */
/*00b0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00c0*/ MOV R4, c[0x0][0x180] ; /* 0x0000600000047a02 */
/* 0x000fe20000000f00 */
/*00d0*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*00e0*/ IMAD R6, R0.reuse, c[0x0][0x180], R3.reuse ; /* 0x0000600000067a24 */
/* 0x140fe200078e0203 */
/*00f0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0100*/ IMAD R2, R0, R4, c[0x0][0x160] ; /* 0x0000580000027624 */
/* 0x000fe400078e0204 */
/*0110*/ IMAD R4, R4, c[0x0][0x160], R3 ; /* 0x0000580004047a24 */
/* 0x000fc800078e0203 */
/*0120*/ IMAD.WIDE R2, R2, R7, c[0x0][0x178] ; /* 0x00005e0002027625 */
/* 0x000fc800078e0207 */
/*0130*/ IMAD.WIDE R4, R4, R7.reuse, c[0x0][0x178] ; /* 0x00005e0004047625 */
/* 0x080fe400078e0207 */
/*0140*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea4000c1e1900 */
/*0150*/ IMAD.WIDE R6, R6, R7, c[0x0][0x178] ; /* 0x00005e0006067625 */
/* 0x000fe400078e0207 */
/*0160*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*0170*/ LDG.E R0, [R6.64] ; /* 0x0000000406007981 */
/* 0x000ee2000c1e1900 */
/*0180*/ IADD3 R9, R4, R3, RZ ; /* 0x0000000304097210 */
/* 0x004fc80007ffe0ff */
/*0190*/ ISETP.GE.AND P0, PT, R9, R0, PT ; /* 0x000000000900720c */
/* 0x008fda0003f06270 */
/*01a0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*01b0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*01c0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*01d0*/ BRA 0x1d0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0200*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <assert.h>
#include <vector>
using namespace std;
const int INF = 10000000;
const int V = 10010;
const int MAX_THREAD_DIM2 = 32;
void input(char *inFileName, int B);
void output(char *outFileName);
void block_FW_2GPU(int B);
int ceil(int a, int b);
void calAsync(int gpuId, int B, int Round, int block_start_x, int block_start_y, int block_width, int block_height);
int realn;
int n, m; // Number of vertices, edges
int* Dist; // n * n, on host
int* dDist[2]; // n * n, on device
int streamSize[2];
vector<cudaStream_t> streams[2];
int getGPUId ()
{
int gpuId;
cudaGetDevice(&gpuId);
return gpuId;
}
cudaStream_t getIdleStream (int gpuId)
{
cudaSetDevice(gpuId);
if(streams[gpuId].size() == streamSize[gpuId])
{
cudaStream_t stm;
cudaStreamCreate(&stm);
streams[gpuId].push_back(stm);
streamSize[gpuId]++;
return stm;
}
else
return streams[gpuId][streamSize[gpuId]++];
}
void syncAllStreams ()
{
cudaThreadSynchronize();
streamSize[0] = 0;
streamSize[1] = 0;
}
void blockCopyAsync (int gpuId, int* dst, const int* src, cudaMemcpyKind kind, cudaStream_t stream, int B, int bi0, int bi1, int bj0, int bj1)
{
cudaSetDevice(gpuId);
for(int i = bi0 * B; i < bi1 * B; ++i)
{
int offset = i * n + bj0 * B;
int size = (bj1 - bj0) * B * sizeof(int);
cudaMemcpyAsync(dst + offset, src + offset, size, kind, stream);
}
}
int main(int argc, char* argv[])
{
int B = atoi(argv[3]);
input(argv[1], B);
block_FW_2GPU(B);
output(argv[2]);
return 0;
}
void input(char *inFileName, int B)
{
FILE *infile = fopen(inFileName, "r");
fscanf(infile, "%d %d", &realn, &m);
n = ceil(realn, B) * B;
cudaMallocManaged(&Dist, n * n * sizeof(int));
for (int i = 0, k = 0; i < n; ++i) {
for (int j = 0; j < n; ++j, ++k) {
if (i == j) Dist[k] = 0;
else Dist[k] = INF;
}
}
while (--m >= 0) {
int a, b, v;
fscanf(infile, "%d %d %d", &a, &b, &v);
--a, --b;
Dist[a * n + b] = v;
}
}
void output(char *outFileName)
{
FILE *outfile = fopen(outFileName, "w");
for (int i = 0; i < realn; ++i) {
for (int j = 0; j < realn; ++j) {
int d = Dist[i * n + j];
if (d >= INF) fprintf(outfile, "INF ");
else fprintf(outfile, "%d ", d);
}
fprintf(outfile, "\n");
}
cudaFree(Dist);
}
void print ()
{
for (int i = 0; i < realn; ++i) {
for (int j = 0; j < realn; ++j) {
int d = Dist[i * n + j];
if (d >= INF) fprintf(stderr, "INF ");
else fprintf(stderr, "%d ", d);
}
fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
}
int ceil(int a, int b)
{
return (a + b -1)/b;
}
void block_FW_2GPU(int B)
{
int round = ceil(n, B);
for (int r = 0; r < round; ++r) {
/* Phase 1*/
fprintf(stderr, "Round: %d\n", r);
calAsync(0, B, r, r, r, 1, 1);
syncAllStreams();
/* Phase 2*/
calAsync(0, B, r, r, 0, r, 1); // L 0
calAsync(0, B, r, r, r +1, round - r -1, 1); // R 0
calAsync(1, B, r, 0, r, 1, r); // U 1
calAsync(1, B, r, r +1, r, 1, round - r -1); // D 1
syncAllStreams();
/* Phase 3*/
calAsync(0, B, r, 0, 0, r, r); // <^
calAsync(1, B, r, 0, r +1, round -r -1, r); // ^>
calAsync(1, B, r, r +1, 0, r, round - r -1); // <v
calAsync(0, B, r, r +1, r +1, round -r -1, round - r -1); // v>
syncAllStreams();
}
}
__global__
void Update (int k, int i0, int j0, int i1, int j1, int* dDist, int n)
{
#define D(i,j) (dDist[(i) * n + (j)])
int i = blockDim.x * blockIdx.x + threadIdx.x + i0;
int j = blockDim.y * blockIdx.y + threadIdx.y + j0;
if(i >= i1 || j >= j1)
return;
int Dik = D(i, k);
int Dkj = D(k, j);
int D1 = Dik + Dkj;
if (D1 < D(i, j))
D(i, j) = D1;
}
__global__
void UpdateIndependent (int k0, int k1, int i0, int j0, int i1, int j1, int* dDist, int n)
{
#define D(i,j) (dDist[(i) * n + (j)])
int tx = threadIdx.x;
int ty = threadIdx.y;
int di = blockDim.x * blockIdx.x + tx;
int dj = blockDim.y * blockIdx.y + ty;
int i = i0 + di;
int j = j0 + dj;
bool valid = i < i1 && j < j1;
__shared__ int Si[MAX_THREAD_DIM2][MAX_THREAD_DIM2];
__shared__ int Sj[MAX_THREAD_DIM2][MAX_THREAD_DIM2];
const int cacheSize = MAX_THREAD_DIM2;
int Dij = valid? D(i, j): 0;
int dkmod = 0;
for(int k = k0; k < k1; ++k)
{
if(dkmod == 0)
{
__syncthreads();
if(i < i1 && k+ty < k1)
Si[ty][tx] = D(i, k+ty);
if(j < j1 && k+tx < k1)
Sj[tx][ty] = D(k+tx, j);
__syncthreads();
}
if(valid)
{
// assert(Si[tx][dkmod] == D(i,k));
// assert(Sj[dkmod][ty] == D(k,j));
// int Dik = D(i, k);
// int Dkj = D(k, j);
int Dik = Si[dkmod][tx];
int Dkj = Sj[dkmod][ty];
int D1 = Dik + Dkj;
if (D1 < Dij)
Dij = D1;
}
dkmod = (dkmod + 1) % cacheSize;
}
if(valid)
D(i, j) = Dij;
}
void calAsync(int gpuId, int B, int Round, int block_start_x, int block_start_y, int block_width, int block_height)
{
cudaSetDevice(gpuId);
int block_end_x = block_start_x + block_height;
int block_end_y = block_start_y + block_width;
for (int b_i = block_start_x; b_i < block_end_x; ++b_i) {
for (int b_j = block_start_y; b_j < block_end_y; ++b_j) {
// To calculate B*B elements in the block (b_i, b_j)
// For each block, it need to compute B times
// for (int k = Round * B; k < (Round +1) * B && k < n; ++k) {
// To calculate original index of elements in the block (b_i, b_j)
// For instance, original index of (0,0) in block (1,2) is (2,5) for V=6,B=2
int i0 = b_i * B;
int i1 = min((b_i +1) * B, n);
int j0 = b_j * B;
int j1 = min((b_j +1) * B, n);
int k0 = Round * B;
int k1 = min((Round +1) * B, n);
bool iDepends = i0 == k0;
bool jDepends = j0 == k0;
int threadDim = MAX_THREAD_DIM2;//std::min(B, MAX_THREAD_DIM2);
int blockDim = (B + MAX_THREAD_DIM2 - 1) / MAX_THREAD_DIM2;
dim3 grid(blockDim, blockDim), block(threadDim, threadDim);
cudaStream_t stm = getIdleStream(gpuId);
if(iDepends || jDepends)
{
for(int k=k0; k<k1; ++k)
Update<<<grid, block, 0, stm>>>(k, i0, j0, i1, j1, Dist, n);
}
else
UpdateIndependent<<<grid, block, 0, stm>>>(k0, k1, i0, j0, i1, j1, Dist, n);
// for (int i = i0; i < i1; ++i) {
// for (int j = j0; j < j1; ++j) {
// if (Dist[i][k] + Dist[k][j] < Dist[i][j])
// Dist[i][j] = Dist[i][k] + Dist[k][j];
// }
// }
// }
}
}
} | .file "tmpxft_0014a179_00000000-6_HW4_x1054037_2GPU_Unified_bug.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB4055:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4055:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.type __tcf_0, @function
__tcf_0:
.LFB4770:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq 24+streams(%rip), %rdi
testq %rdi, %rdi
je .L4
movq 40+streams(%rip), %rsi
subq %rdi, %rsi
call _ZdlPvm@PLT
.L4:
movq streams(%rip), %rdi
testq %rdi, %rdi
je .L3
movq 16+streams(%rip), %rsi
subq %rdi, %rsi
call _ZdlPvm@PLT
.L3:
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4770:
.size __tcf_0, .-__tcf_0
.globl _Z8getGPUIdv
.type _Z8getGPUIdv, @function
_Z8getGPUIdv:
.LFB4042:
.cfi_startproc
endbr64
subq $24, %rsp
.cfi_def_cfa_offset 32
movq %fs:40, %rax
movq %rax, 8(%rsp)
xorl %eax, %eax
leaq 4(%rsp), %rdi
call cudaGetDevice@PLT
movl 4(%rsp), %eax
movq 8(%rsp), %rdx
subq %fs:40, %rdx
jne .L10
addq $24, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L10:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE4042:
.size _Z8getGPUIdv, .-_Z8getGPUIdv
.globl _Z14syncAllStreamsv
.type _Z14syncAllStreamsv, @function
_Z14syncAllStreamsv:
.LFB4044:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call cudaThreadSynchronize@PLT
movl $0, streamSize(%rip)
movl $0, 4+streamSize(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4044:
.size _Z14syncAllStreamsv, .-_Z14syncAllStreamsv
.globl _Z14blockCopyAsynciPiPKi14cudaMemcpyKindP11CUstream_stiiiii
.type _Z14blockCopyAsynciPiPKi14cudaMemcpyKindP11CUstream_stiiiii, @function
_Z14blockCopyAsynciPiPKi14cudaMemcpyKindP11CUstream_stiiiii:
.LFB4045:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $24, %rsp
.cfi_def_cfa_offset 80
movq %rsi, %r12
movq %rdx, %r13
movl %ecx, 4(%rsp)
movq %r8, 8(%rsp)
movl %r9d, %ebp
call cudaSetDevice@PLT
movl %ebp, %ebx
imull 80(%rsp), %ebx
movl %ebp, %r14d
imull 88(%rsp), %r14d
cmpl %r14d, %ebx
jge .L13
movl %ebp, %r15d
imull 96(%rsp), %r15d
movl 104(%rsp), %eax
subl 96(%rsp), %eax
imull %eax, %ebp
sall $2, %ebp
movslq %ebp, %rbp
.L15:
movl %ebx, %edi
imull n(%rip), %edi
addl %r15d, %edi
movslq %edi, %rdi
salq $2, %rdi
leaq 0(%r13,%rdi), %rsi
addq %r12, %rdi
movq 8(%rsp), %r8
movl 4(%rsp), %ecx
movq %rbp, %rdx
call cudaMemcpyAsync@PLT
addl $1, %ebx
cmpl %r14d, %ebx
jne .L15
.L13:
addq $24, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4045:
.size _Z14blockCopyAsynciPiPKi14cudaMemcpyKindP11CUstream_stiiiii, .-_Z14blockCopyAsynciPiPKi14cudaMemcpyKindP11CUstream_stiiiii
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "r"
.LC1:
.string "%d %d"
.LC2:
.string "%d %d %d"
.text
.globl _Z5inputPci
.type _Z5inputPci, @function
_Z5inputPci:
.LFB4047:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $40, %rsp
.cfi_def_cfa_offset 64
movl %esi, %ebp
movq %fs:40, %rax
movq %rax, 24(%rsp)
xorl %eax, %eax
leaq .LC0(%rip), %rsi
call fopen@PLT
movq %rax, %rbx
leaq m(%rip), %rcx
leaq realn(%rip), %rdx
leaq .LC1(%rip), %rsi
movq %rax, %rdi
movl $0, %eax
call __isoc23_fscanf@PLT
movl %ebp, %eax
addl realn(%rip), %eax
subl $1, %eax
cltd
idivl %ebp
imull %ebp, %eax
movl %eax, n(%rip)
imull %eax, %eax
movslq %eax, %rsi
salq $2, %rsi
movl $1, %edx
leaq Dist(%rip), %rdi
call cudaMallocManaged@PLT
movl $0, %edi
movl $0, %ecx
cmpl $0, n(%rip)
jg .L19
.L20:
movl m(%rip), %eax
subl $1, %eax
movl %eax, m(%rip)
js .L18
leaq .LC2(%rip), %rbp
.L27:
leaq 16(%rsp), %rcx
leaq 12(%rsp), %rdx
leaq 20(%rsp), %r8
movq %rbp, %rsi
movq %rbx, %rdi
movl $0, %eax
call __isoc23_fscanf@PLT
movl 12(%rsp), %eax
subl $1, %eax
imull n(%rip), %eax
movl 16(%rsp), %edx
leal -1(%rax,%rdx), %eax
cltq
movl 20(%rsp), %ecx
movq Dist(%rip), %rdx
movl %ecx, (%rdx,%rax,4)
movl m(%rip), %eax
subl $1, %eax
movl %eax, m(%rip)
jns .L27
.L18:
movq 24(%rsp), %rax
subq %fs:40, %rax
jne .L35
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L21:
.cfi_restore_state
movq Dist(%rip), %rsi
movl $10000000, (%rsi,%rdx)
.L22:
addl $1, %eax
addq $4, %rdx
cmpl %eax, n(%rip)
jle .L36
.L23:
cmpl %eax, %ecx
jne .L21
movq Dist(%rip), %rsi
movl $0, (%rsi,%rdx)
jmp .L22
.L36:
addl %eax, %edi
.L25:
addl $1, %ecx
cmpl %ecx, n(%rip)
jle .L20
.L19:
movslq %edi, %rdx
salq $2, %rdx
movl $0, %eax
cmpl $0, n(%rip)
jg .L23
jmp .L25
.L35:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE4047:
.size _Z5inputPci, .-_Z5inputPci
.section .rodata.str1.1
.LC3:
.string "w"
.LC4:
.string "INF "
.LC5:
.string "%d "
.LC6:
.string "\n"
.text
.globl _Z6outputPc
.type _Z6outputPc, @function
_Z6outputPc:
.LFB4048:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $8, %rsp
.cfi_def_cfa_offset 64
leaq .LC3(%rip), %rsi
call fopen@PLT
movq %rax, %r12
movl $0, %ebp
leaq .LC5(%rip), %r14
leaq .LC4(%rip), %r13
leaq .LC6(%rip), %r15
cmpl $0, realn(%rip)
jg .L38
.L39:
movq Dist(%rip), %rdi
call cudaFree@PLT
addq $8, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L40:
.cfi_restore_state
movq %r14, %rdx
movl $2, %esi
movq %r12, %rdi
movl $0, %eax
call __fprintf_chk@PLT
.L41:
addl $1, %ebx
cmpl %ebx, realn(%rip)
jle .L43
.L42:
movl %ebp, %eax
imull n(%rip), %eax
addl %ebx, %eax
cltq
movq Dist(%rip), %rdx
movl (%rdx,%rax,4), %ecx
cmpl $9999999, %ecx
jle .L40
movq %r13, %rdx
movl $2, %esi
movq %r12, %rdi
movl $0, %eax
call __fprintf_chk@PLT
jmp .L41
.L43:
movq %r15, %rdx
movl $2, %esi
movq %r12, %rdi
movl $0, %eax
call __fprintf_chk@PLT
addl $1, %ebp
cmpl %ebp, realn(%rip)
jle .L39
.L38:
movl $0, %ebx
cmpl $0, realn(%rip)
jg .L42
jmp .L43
.cfi_endproc
.LFE4048:
.size _Z6outputPc, .-_Z6outputPc
.globl _Z5printv
.type _Z5printv, @function
_Z5printv:
.LFB4049:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
movl $0, %ebp
leaq .LC5(%rip), %r13
leaq .LC4(%rip), %r12
leaq .LC6(%rip), %r14
cmpl $0, realn(%rip)
jg .L51
.L52:
leaq .LC6(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
popq %rbx
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L53:
.cfi_restore_state
movq %r13, %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
.L54:
addl $1, %ebx
cmpl %ebx, realn(%rip)
jle .L56
.L55:
movl %ebp, %eax
imull n(%rip), %eax
addl %ebx, %eax
cltq
movq Dist(%rip), %rdx
movl (%rdx,%rax,4), %ecx
cmpl $9999999, %ecx
jle .L53
movq %r12, %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
jmp .L54
.L56:
movq %r14, %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
addl $1, %ebp
cmpl %ebp, realn(%rip)
jle .L52
.L51:
movl $0, %ebx
cmpl $0, realn(%rip)
jg .L55
jmp .L56
.cfi_endproc
.LFE4049:
.size _Z5printv, .-_Z5printv
.globl _Z4ceilii
.type _Z4ceilii, @function
_Z4ceilii:
.LFB4050:
.cfi_startproc
endbr64
leal -1(%rsi,%rdi), %eax
cltd
idivl %esi
ret
.cfi_endproc
.LFE4050:
.size _Z4ceilii, .-_Z4ceilii
.globl _Z31__device_stub__Z6UpdateiiiiiPiiiiiiiPii
.type _Z31__device_stub__Z6UpdateiiiiiPiiiiiiiPii, @function
_Z31__device_stub__Z6UpdateiiiiiPiiiiiiiPii:
.LFB4077:
.cfi_startproc
endbr64
subq $168, %rsp
.cfi_def_cfa_offset 176
movl %edi, 28(%rsp)
movl %esi, 24(%rsp)
movl %edx, 20(%rsp)
movl %ecx, 16(%rsp)
movl %r8d, 12(%rsp)
movq %r9, (%rsp)
movq %fs:40, %rax
movq %rax, 152(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 24(%rsp), %rax
movq %rax, 104(%rsp)
leaq 20(%rsp), %rax
movq %rax, 112(%rsp)
leaq 16(%rsp), %rax
movq %rax, 120(%rsp)
leaq 12(%rsp), %rax
movq %rax, 128(%rsp)
movq %rsp, %rax
movq %rax, 136(%rsp)
leaq 176(%rsp), %rax
movq %rax, 144(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L68
.L64:
movq 152(%rsp), %rax
subq %fs:40, %rax
jne .L69
addq $168, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L68:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 184
pushq 40(%rsp)
.cfi_def_cfa_offset 192
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z6UpdateiiiiiPii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 176
jmp .L64
.L69:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE4077:
.size _Z31__device_stub__Z6UpdateiiiiiPiiiiiiiPii, .-_Z31__device_stub__Z6UpdateiiiiiPiiiiiiiPii
.globl _Z6UpdateiiiiiPii
.type _Z6UpdateiiiiiPii, @function
_Z6UpdateiiiiiPii:
.LFB4078:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
call _Z31__device_stub__Z6UpdateiiiiiPiiiiiiiPii
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4078:
.size _Z6UpdateiiiiiPii, .-_Z6UpdateiiiiiPii
.globl _Z44__device_stub__Z17UpdateIndependentiiiiiiPiiiiiiiiPii
.type _Z44__device_stub__Z17UpdateIndependentiiiiiiPiiiiiiiiPii, @function
_Z44__device_stub__Z17UpdateIndependentiiiiiiPiiiiiiiiPii:
.LFB4079:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movl %edi, 28(%rsp)
movl %esi, 24(%rsp)
movl %edx, 20(%rsp)
movl %ecx, 16(%rsp)
movl %r8d, 12(%rsp)
movl %r9d, 8(%rsp)
movq 192(%rsp), %rax
movq %rax, (%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 24(%rsp), %rax
movq %rax, 104(%rsp)
leaq 20(%rsp), %rax
movq %rax, 112(%rsp)
leaq 16(%rsp), %rax
movq %rax, 120(%rsp)
leaq 12(%rsp), %rax
movq %rax, 128(%rsp)
leaq 8(%rsp), %rax
movq %rax, 136(%rsp)
movq %rsp, %rax
movq %rax, 144(%rsp)
leaq 200(%rsp), %rax
movq %rax, 152(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L76
.L72:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L77
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L76:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 200
pushq 40(%rsp)
.cfi_def_cfa_offset 208
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z17UpdateIndependentiiiiiiPii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L72
.L77:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE4079:
.size _Z44__device_stub__Z17UpdateIndependentiiiiiiPiiiiiiiiPii, .-_Z44__device_stub__Z17UpdateIndependentiiiiiiPiiiiiiiiPii
.globl _Z17UpdateIndependentiiiiiiPii
.type _Z17UpdateIndependentiiiiiiPii, @function
_Z17UpdateIndependentiiiiiiPii:
.LFB4080:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 24
pushq 24(%rsp)
.cfi_def_cfa_offset 32
call _Z44__device_stub__Z17UpdateIndependentiiiiiiPiiiiiiiiPii
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4080:
.size _Z17UpdateIndependentiiiiiiPii, .-_Z17UpdateIndependentiiiiiiPii
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC7:
.string "_Z17UpdateIndependentiiiiiiPii"
.section .rodata.str1.1
.LC8:
.string "_Z6UpdateiiiiiPii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB4082:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC7(%rip), %rdx
movq %rdx, %rcx
leaq _Z17UpdateIndependentiiiiiiPii(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC8(%rip), %rdx
movq %rdx, %rcx
leaq _Z6UpdateiiiiiPii(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4082:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .rodata._ZNSt6vectorIP11CUstream_stSaIS1_EE17_M_realloc_insertIJRKS1_EEEvN9__gnu_cxx17__normal_iteratorIPS1_S3_EEDpOT_.str1.1,"aMS",@progbits,1
.LC9:
.string "vector::_M_realloc_insert"
.section .text._ZNSt6vectorIP11CUstream_stSaIS1_EE17_M_realloc_insertIJRKS1_EEEvN9__gnu_cxx17__normal_iteratorIPS1_S3_EEDpOT_,"axG",@progbits,_ZNSt6vectorIP11CUstream_stSaIS1_EE17_M_realloc_insertIJRKS1_EEEvN9__gnu_cxx17__normal_iteratorIPS1_S3_EEDpOT_,comdat
.align 2
.weak _ZNSt6vectorIP11CUstream_stSaIS1_EE17_M_realloc_insertIJRKS1_EEEvN9__gnu_cxx17__normal_iteratorIPS1_S3_EEDpOT_
.type _ZNSt6vectorIP11CUstream_stSaIS1_EE17_M_realloc_insertIJRKS1_EEEvN9__gnu_cxx17__normal_iteratorIPS1_S3_EEDpOT_, @function
_ZNSt6vectorIP11CUstream_stSaIS1_EE17_M_realloc_insertIJRKS1_EEEvN9__gnu_cxx17__normal_iteratorIPS1_S3_EEDpOT_:
.LFB4553:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $24, %rsp
.cfi_def_cfa_offset 80
movq %rsi, (%rsp)
movq %rdx, 8(%rsp)
movq 8(%rdi), %rbp
movq (%rdi), %r13
movq %rbp, %rax
subq %r13, %rax
sarq $3, %rax
movabsq $1152921504606846975, %rdx
cmpq %rdx, %rax
je .L99
movq %rdi, %rbx
cmpq %r13, %rbp
movl $1, %edx
cmovne %rax, %rdx
addq %rdx, %rax
jc .L85
movabsq $1152921504606846975, %r14
cmpq %r14, %rax
cmovbe %rax, %r14
movq (%rsp), %r15
subq %r13, %r15
movl $0, %r12d
testq %rax, %rax
je .L86
jmp .L93
.L99:
leaq .LC9(%rip), %rdi
call _ZSt20__throw_length_errorPKc@PLT
.L100:
movq %r15, %rdx
movq %r13, %rsi
movq %r12, %rdi
call memmove@PLT
leaq 8(%r12,%r15), %r15
movq (%rsp), %rax
subq %rax, %rbp
testq %rbp, %rbp
jg .L88
addq %rbp, %r15
movq 16(%rbx), %rsi
subq %r13, %rsi
jmp .L92
.L85:
movq (%rsp), %r15
subq %r13, %r15
movabsq $1152921504606846975, %r14
.L93:
leaq 0(,%r14,8), %rdi
call _Znwm@PLT
movq %rax, %r12
.L86:
movq 8(%rsp), %rax
movq (%rax), %rax
movq %rax, (%r12,%r15)
testq %r15, %r15
jg .L100
leaq 8(%r12,%r15), %r15
movq (%rsp), %rax
subq %rax, %rbp
testq %rbp, %rbp
jle .L90
.L88:
movq %rbp, %rdx
movq (%rsp), %rsi
movq %r15, %rdi
call memcpy@PLT
.L90:
addq %rbp, %r15
testq %r13, %r13
je .L91
movq 16(%rbx), %rsi
subq %r13, %rsi
.L92:
movq %r13, %rdi
call _ZdlPvm@PLT
.L91:
movq %r12, (%rbx)
movq %r15, 8(%rbx)
leaq (%r12,%r14,8), %rax
movq %rax, 16(%rbx)
addq $24, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4553:
.size _ZNSt6vectorIP11CUstream_stSaIS1_EE17_M_realloc_insertIJRKS1_EEEvN9__gnu_cxx17__normal_iteratorIPS1_S3_EEDpOT_, .-_ZNSt6vectorIP11CUstream_stSaIS1_EE17_M_realloc_insertIJRKS1_EEEvN9__gnu_cxx17__normal_iteratorIPS1_S3_EEDpOT_
.text
.globl _Z13getIdleStreami
.type _Z13getIdleStreami, @function
_Z13getIdleStreami:
.LFB4043:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
subq $16, %rsp
.cfi_def_cfa_offset 32
movl %edi, %ebx
movq %fs:40, %rax
movq %rax, 8(%rsp)
xorl %eax, %eax
call cudaSetDevice@PLT
leaq streams(%rip), %rsi
movslq %ebx, %rdx
leaq (%rdx,%rdx), %rax
leaq (%rax,%rdx), %rcx
movq (%rsi,%rcx,8), %rdi
leaq streamSize(%rip), %rcx
movl (%rcx,%rdx,4), %ecx
movslq %ecx, %r8
addq %rdx, %rax
movq 8(%rsi,%rax,8), %rax
subq %rdi, %rax
sarq $3, %rax
cmpq %rax, %r8
je .L108
movslq %ebx, %rbx
addl $1, %ecx
leaq streamSize(%rip), %rax
movl %ecx, (%rax,%rbx,4)
movq (%rdi,%r8,8), %rax
.L101:
movq 8(%rsp), %rdx
subq %fs:40, %rdx
jne .L109
addq $16, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
ret
.L108:
.cfi_restore_state
movq %rsp, %rdi
call cudaStreamCreate@PLT
leaq streams(%rip), %rcx
movslq %ebx, %rdx
leaq (%rdx,%rdx), %rax
leaq (%rax,%rdx), %rsi
movq 8(%rcx,%rsi,8), %rsi
addq %rdx, %rax
cmpq 16(%rcx,%rax,8), %rsi
je .L103
movq (%rsp), %rax
movq %rax, (%rsi)
leaq (%rdx,%rdx), %rax
leaq (%rax,%rdx), %rsi
movq 8(%rcx,%rsi,8), %rax
addq $8, %rax
movq %rax, 8(%rcx,%rsi,8)
.L104:
leaq streamSize(%rip), %rax
movslq %ebx, %rbx
addl $1, (%rax,%rbx,4)
movq (%rsp), %rax
jmp .L101
.L103:
movq %rsp, %rdx
movslq %ebx, %rax
leaq (%rax,%rax,2), %rcx
leaq streams(%rip), %rax
leaq (%rax,%rcx,8), %rdi
call _ZNSt6vectorIP11CUstream_stSaIS1_EE17_M_realloc_insertIJRKS1_EEEvN9__gnu_cxx17__normal_iteratorIPS1_S3_EEDpOT_
jmp .L104
.L109:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE4043:
.size _Z13getIdleStreami, .-_Z13getIdleStreami
.globl _Z8calAsynciiiiiii
.type _Z8calAsynciiiiiii, @function
_Z8calAsynciiiiiii:
.LFB4052:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $104, %rsp
.cfi_def_cfa_offset 160
movl %edi, 36(%rsp)
movl %esi, %r15d
movl %esi, 16(%rsp)
movl %edx, %ebx
movl %ecx, %ebp
movl %r8d, %r13d
movl %r9d, %r12d
call cudaSetDevice@PLT
movl %ebp, %edx
addl 160(%rsp), %edx
leal 0(%r13,%r12), %eax
movl %eax, 20(%rsp)
cmpl %edx, %ebp
jge .L110
movl %r15d, %edi
imull %ebx, %edi
movl %edi, 8(%rsp)
addl $1, %ebx
movl %r15d, %edi
imull %r15d, %ebx
movl %ebx, 32(%rsp)
leal 62(%r15), %eax
movl %r15d, %ecx
addl $31, %ecx
cmovns %ecx, %eax
sarl $5, %eax
movl %eax, 40(%rsp)
movl %r15d, %eax
imull %ebp, %eax
imull %r13d, %edi
movl %edx, 56(%rsp)
movl %eax, %edx
movl %edi, 60(%rsp)
movl %ebp, %eax
movl %r13d, 44(%rsp)
jmp .L112
.L121:
movl 8(%rsp), %ebp
cmpl %ebx, %ebp
jl .L117
.L115:
movl 20(%rsp), %eax
cmpl %eax, %r15d
je .L125
.L118:
movl n(%rip), %ebx
movl 24(%rsp), %eax
cmpl %eax, %ebx
movl %eax, %r14d
cmovle %ebx, %r14d
movl %r13d, %esi
movl %r13d, (%rsp)
addl $1, %r15d
movl 16(%rsp), %eax
addl %eax, %r13d
cmpl %ebx, %r13d
movl %ebx, %eax
cmovle %r13d, %eax
movl %eax, 4(%rsp)
movl 32(%rsp), %eax
cmpl %eax, %ebx
cmovg %eax, %ebx
movl 8(%rsp), %eax
cmpl %eax, %esi
sete %bpl
movl 40(%rsp), %eax
movl %eax, 72(%rsp)
movl %eax, 76(%rsp)
movl $1, 80(%rsp)
movl $32, 84(%rsp)
movl $32, 88(%rsp)
movl $1, 92(%rsp)
movl 36(%rsp), %edi
call _Z13getIdleStreami
movq %rax, %r12
cmpb $0, 31(%rsp)
jne .L121
testb %bpl, %bpl
jne .L121
movl 92(%rsp), %ecx
movq %rax, %r9
movl $0, %r8d
movq 84(%rsp), %rdx
movq 72(%rsp), %rdi
movl 80(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L115
movl n(%rip), %eax
pushq %rax
.cfi_def_cfa_offset 168
pushq Dist(%rip)
.cfi_def_cfa_offset 176
movl 20(%rsp), %r9d
movl %r14d, %r8d
movl 16(%rsp), %ecx
movl 28(%rsp), %edx
movl %ebx, %esi
movl 24(%rsp), %edi
call _Z44__device_stub__Z17UpdateIndependentiiiiiiPiiiiiiiiPii
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L115
.L116:
addl $1, %ebp
cmpl %ebp, %ebx
je .L115
.L117:
movl 92(%rsp), %ecx
movq %r12, %r9
movl $0, %r8d
movq 84(%rsp), %rdx
movq 72(%rsp), %rdi
movl 80(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L116
subq $8, %rsp
.cfi_def_cfa_offset 168
movl n(%rip), %eax
pushq %rax
.cfi_def_cfa_offset 176
movq Dist(%rip), %r9
movl 20(%rsp), %r8d
movl %r14d, %ecx
movl 16(%rsp), %edx
movl 28(%rsp), %esi
movl %ebp, %edi
call _Z31__device_stub__Z6UpdateiiiiiPiiiiiiiPii
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L116
.L125:
movl 48(%rsp), %edx
movl 52(%rsp), %eax
.L120:
addl $1, %eax
movl 16(%rsp), %edi
addl %edi, %edx
cmpl %eax, 56(%rsp)
je .L110
.L112:
movl 20(%rsp), %esi
cmpl %esi, 44(%rsp)
jge .L120
movl %edx, 12(%rsp)
movl 16(%rsp), %edi
addl %edx, %edi
movl %edi, 24(%rsp)
movl 8(%rsp), %edi
cmpl %edi, %edx
sete 31(%rsp)
movl 60(%rsp), %r13d
movl 44(%rsp), %r15d
movl %edx, 48(%rsp)
movl %eax, 52(%rsp)
jmp .L118
.L110:
addq $104, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4052:
.size _Z8calAsynciiiiiii, .-_Z8calAsynciiiiiii
.section .rodata.str1.1
.LC10:
.string "Round: %d\n"
.text
.globl _Z13block_FW_2GPUi
.type _Z13block_FW_2GPUi, @function
_Z13block_FW_2GPUi:
.LFB4051:
.cfi_startproc
endbr64
movl %edi, %eax
addl n(%rip), %eax
subl $1, %eax
cltd
idivl %edi
testl %eax, %eax
jle .L131
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $8, %rsp
.cfi_def_cfa_offset 64
movl %edi, %r12d
movl %eax, %r14d
movl $0, %ebx
leaq .LC10(%rip), %r15
.L128:
movl %ebx, %ecx
movq %r15, %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
subq $8, %rsp
.cfi_def_cfa_offset 72
pushq $1
.cfi_def_cfa_offset 80
movl $1, %r9d
movl %ebx, %r8d
movl %ebx, %ecx
movl %ebx, %edx
movl %r12d, %esi
movl $0, %edi
call _Z8calAsynciiiiiii
addq $16, %rsp
.cfi_def_cfa_offset 64
call _Z14syncAllStreamsv
subq $8, %rsp
.cfi_def_cfa_offset 72
pushq $1
.cfi_def_cfa_offset 80
movl %ebx, %r9d
movl $0, %r8d
movl %ebx, %ecx
movl %ebx, %edx
movl %r12d, %esi
movl $0, %edi
call _Z8calAsynciiiiiii
leal -1(%r14), %r13d
subl %ebx, %r13d
movl %ebx, %ebp
addl $1, %ebx
movl $1, (%rsp)
movl %r13d, %r9d
movl %ebx, %r8d
movl %ebp, %ecx
movl %ebp, %edx
movl %r12d, %esi
movl $0, %edi
call _Z8calAsynciiiiiii
movl %ebp, (%rsp)
movl $1, %r9d
movl %ebp, %r8d
movl $0, %ecx
movl %ebp, %edx
movl %r12d, %esi
movl $1, %edi
call _Z8calAsynciiiiiii
movl %r13d, (%rsp)
movl $1, %r9d
movl %ebp, %r8d
movl %ebx, %ecx
movl %ebp, %edx
movl %r12d, %esi
movl $1, %edi
call _Z8calAsynciiiiiii
addq $16, %rsp
.cfi_def_cfa_offset 64
call _Z14syncAllStreamsv
subq $8, %rsp
.cfi_def_cfa_offset 72
pushq %rbp
.cfi_def_cfa_offset 80
movl %ebp, %r9d
movl $0, %r8d
movl $0, %ecx
movl %ebp, %edx
movl %r12d, %esi
movl $0, %edi
call _Z8calAsynciiiiiii
movl %ebp, (%rsp)
movl %r13d, %r9d
movl %ebx, %r8d
movl $0, %ecx
movl %ebp, %edx
movl %r12d, %esi
movl $1, %edi
call _Z8calAsynciiiiiii
movl %r13d, (%rsp)
movl %ebp, %r9d
movl $0, %r8d
movl %ebx, %ecx
movl %ebp, %edx
movl %r12d, %esi
movl $1, %edi
call _Z8calAsynciiiiiii
movl %r13d, (%rsp)
movl %r13d, %r9d
movl %ebx, %r8d
movl %ebx, %ecx
movl %ebp, %edx
movl %r12d, %esi
movl $0, %edi
call _Z8calAsynciiiiiii
addq $16, %rsp
.cfi_def_cfa_offset 64
call _Z14syncAllStreamsv
cmpl %r14d, %ebx
jne .L128
addq $8, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L131:
.cfi_restore 3
.cfi_restore 6
.cfi_restore 12
.cfi_restore 13
.cfi_restore 14
.cfi_restore 15
ret
.cfi_endproc
.LFE4051:
.size _Z13block_FW_2GPUi, .-_Z13block_FW_2GPUi
.globl main
.type main, @function
main:
.LFB4046:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $8, %rsp
.cfi_def_cfa_offset 32
movq %rsi, %rbx
movq 24(%rsi), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movl %eax, %ebp
movq 8(%rbx), %rdi
movl %eax, %esi
call _Z5inputPci
movl %ebp, %edi
call _Z13block_FW_2GPUi
movq 16(%rbx), %rdi
call _Z6outputPc
movl $0, %eax
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4046:
.size main, .-main
.type _GLOBAL__sub_I_realn, @function
_GLOBAL__sub_I_realn:
.LFB4771:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq $0, streams(%rip)
movq $0, 8+streams(%rip)
movq $0, 16+streams(%rip)
movq $0, 24+streams(%rip)
movq $0, 32+streams(%rip)
movq $0, 40+streams(%rip)
leaq __dso_handle(%rip), %rdx
movl $0, %esi
leaq __tcf_0(%rip), %rdi
call __cxa_atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4771:
.size _GLOBAL__sub_I_realn, .-_GLOBAL__sub_I_realn
.section .init_array
.align 8
.quad _GLOBAL__sub_I_realn
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.globl streams
.bss
.align 32
.type streams, @object
.size streams, 48
streams:
.zero 48
.globl streamSize
.align 8
.type streamSize, @object
.size streamSize, 8
streamSize:
.zero 8
.globl dDist
.align 16
.type dDist, @object
.size dDist, 16
dDist:
.zero 16
.globl Dist
.align 8
.type Dist, @object
.size Dist, 8
Dist:
.zero 8
.globl m
.align 4
.type m, @object
.size m, 4
m:
.zero 4
.globl n
.align 4
.type n, @object
.size n, 4
n:
.zero 4
.globl realn
.align 4
.type realn, @object
.size realn, 4
realn:
.zero 4
.hidden __dso_handle
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.