system stringclasses 7 values | input stringlengths 10 94.5k | output stringlengths 14 55.5k |
|---|---|---|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <iostream>
#include <stdlib.h>
#include <string>
#include <vector>
#include <sstream>
#include <cuda.h>
#include <iterator>
using namespace std;
__global__ void multiply(int *A, int *B, int *C, int N) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int i = idx / N, j = idx % N;
int sum = 0;
for (int k = 0; k < N; k++) {
sum += A[i * N + k] * B[k * N + j];
}
C[idx] = sum;
}
int main() {
string line;
int N = -1;
int *A, *B, *C, *cur;
int count = 0;
while(getline(cin, line)) {
if (N == -1) {
N = atoi(line.c_str());
A = new int[N * N];
B = new int[N * N];
C = new int[N * N];
cur = A;
} else {
vector<string> nums;
istringstream iss(line);
copy(istream_iterator<string>(iss),
istream_iterator<string>(),
back_inserter(nums));
for (int i = 0; i < nums.size(); i++) {
cur[count * N + i] = atoi(nums[i].c_str());
}
count++;
if (count == N) {
count = 0;
cur = B;
}
}
}
int *dA, *dB, *dC;
cudaMalloc(&dA, sizeof(int) * N * N);
cudaMalloc(&dB, sizeof(int) * N * N);
cudaMalloc(&dC, sizeof(int) * N * N);
cudaMemcpy(dA, A, sizeof(int) * N * N, cudaMemcpyHostToDevice);
cudaMemcpy(dB, B, sizeof(int) * N * N, cudaMemcpyHostToDevice);
cudaMemcpy(dC, C, sizeof(int) * N * N, cudaMemcpyHostToDevice);
dim3 blockDim(64, 1, 1);
dim3 gridDim(N * N / blockDim.x, 1, 1);
multiply<<<gridDim, blockDim>>>(dA, dB, dC, N);
cudaMemcpy(dA, A, sizeof(int) * N * N, cudaMemcpyDeviceToHost);
cudaMemcpy(dB, B, sizeof(int) * N * N, cudaMemcpyDeviceToHost);
cudaMemcpy(dC, C, sizeof(int) * N * N, cudaMemcpyDeviceToHost);
return 0;
} | .file "tmpxft_000c3fab_00000000-6_matrix.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB4126:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4126:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z32__device_stub__Z8multiplyPiS_S_iPiS_S_i
.type _Z32__device_stub__Z8multiplyPiS_S_iPiS_S_i, @function
_Z32__device_stub__Z8multiplyPiS_S_iPiS_S_i:
.LFB4148:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z8multiplyPiS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE4148:
.size _Z32__device_stub__Z8multiplyPiS_S_iPiS_S_i, .-_Z32__device_stub__Z8multiplyPiS_S_iPiS_S_i
.globl _Z8multiplyPiS_S_i
.type _Z8multiplyPiS_S_i, @function
_Z8multiplyPiS_S_i:
.LFB4149:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z32__device_stub__Z8multiplyPiS_S_iPiS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4149:
.size _Z8multiplyPiS_S_i, .-_Z8multiplyPiS_S_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z8multiplyPiS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB4151:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z8multiplyPiS_S_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4151:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .text._ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE12_M_constructIPcEEvT_S7_St20forward_iterator_tag,"axG",@progbits,_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE12_M_constructIPcEEvT_S7_St20forward_iterator_tag,comdat
.align 2
.weak _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE12_M_constructIPcEEvT_S7_St20forward_iterator_tag
.type _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE12_M_constructIPcEEvT_S7_St20forward_iterator_tag, @function
_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE12_M_constructIPcEEvT_S7_St20forward_iterator_tag:
.LFB4704:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $16, %rsp
.cfi_def_cfa_offset 48
movq %rdi, %rbx
movq %rsi, %r12
movq %fs:40, %rax
movq %rax, 8(%rsp)
xorl %eax, %eax
subq %rsi, %rdx
movq %rdx, %rbp
movq %rdx, (%rsp)
cmpq $15, %rdx
ja .L20
movq (%rdi), %rdi
cmpq $1, %rdx
jne .L16
movzbl (%rsi), %eax
movb %al, (%rdi)
.L17:
movq (%rsp), %rax
movq %rax, 8(%rbx)
movq (%rbx), %rdx
movb $0, (%rdx,%rax)
movq 8(%rsp), %rax
subq %fs:40, %rax
jne .L21
addq $16, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L20:
.cfi_restore_state
movq %rsp, %rsi
movl $0, %edx
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE9_M_createERmm@PLT
movq %rax, %rdi
movq %rax, (%rbx)
movq (%rsp), %rax
movq %rax, 16(%rbx)
.L15:
movq %rbp, %rdx
movq %r12, %rsi
call memcpy@PLT
jmp .L17
.L16:
testq %rdx, %rdx
je .L17
jmp .L15
.L21:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE4704:
.size _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE12_M_constructIPcEEvT_S7_St20forward_iterator_tag, .-_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE12_M_constructIPcEEvT_S7_St20forward_iterator_tag
.section .rodata._ZNSt6vectorINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEESaIS5_EE17_M_realloc_insertIJRKS5_EEEvN9__gnu_cxx17__normal_iteratorIPS5_S7_EEDpOT_.str1.1,"aMS",@progbits,1
.LC1:
.string "vector::_M_realloc_insert"
.section .text._ZNSt6vectorINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEESaIS5_EE17_M_realloc_insertIJRKS5_EEEvN9__gnu_cxx17__normal_iteratorIPS5_S7_EEDpOT_,"axG",@progbits,_ZNSt6vectorINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEESaIS5_EE17_M_realloc_insertIJRKS5_EEEvN9__gnu_cxx17__normal_iteratorIPS5_S7_EEDpOT_,comdat
.align 2
.weak _ZNSt6vectorINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEESaIS5_EE17_M_realloc_insertIJRKS5_EEEvN9__gnu_cxx17__normal_iteratorIPS5_S7_EEDpOT_
.type _ZNSt6vectorINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEESaIS5_EE17_M_realloc_insertIJRKS5_EEEvN9__gnu_cxx17__normal_iteratorIPS5_S7_EEDpOT_, @function
_ZNSt6vectorINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEESaIS5_EE17_M_realloc_insertIJRKS5_EEEvN9__gnu_cxx17__normal_iteratorIPS5_S7_EEDpOT_:
.LFB4898:
.cfi_startproc
.cfi_personality 0x9b,DW.ref.__gxx_personality_v0
.cfi_lsda 0x1b,.LLSDA4898
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $24, %rsp
.cfi_def_cfa_offset 80
movq %rdx, 8(%rsp)
movq 8(%rdi), %r13
movq (%rdi), %rbp
movq %r13, %rax
subq %rbp, %rax
sarq $5, %rax
movabsq $288230376151711743, %rdx
cmpq %rdx, %rax
je .L64
movq %rdi, %r12
movq %rsi, %rbx
cmpq %rbp, %r13
movl $1, %edx
cmovne %rax, %rdx
addq %rdx, %rax
jc .L25
movabsq $288230376151711743, %rdx
cmpq %rdx, %rax
cmovbe %rax, %rdx
movq %rdx, (%rsp)
movq %rsi, %r14
subq %rbp, %r14
movl $0, %r15d
testq %rax, %rax
je .L26
jmp .L52
.L64:
leaq .LC1(%rip), %rdi
.LEHB0:
call _ZSt20__throw_length_errorPKc@PLT
.LEHE0:
.L71:
cmpq %rbp, %rbx
je .L55
leaq 16(%rbp), %rax
leaq 16(%rbx), %r8
movq %r15, %rdx
jmp .L36
.L67:
movq -8(%rcx), %r11
leaq 1(%r11), %r9
cmpl $8, %r9d
jnb .L29
testb $4, %r9b
jne .L65
testl %r9d, %r9d
je .L35
movzbl (%rcx), %r10d
movb %r10b, (%rsi)
testb $2, %r9b
je .L35
movl %r9d, %r9d
movzwl -2(%rcx,%r9), %ecx
movw %cx, -2(%rsi,%r9)
jmp .L35
.L65:
movl (%rcx), %r10d
movl %r10d, (%rsi)
movl %r9d, %r9d
movl -4(%rcx,%r9), %ecx
movl %ecx, -4(%rsi,%r9)
jmp .L35
.L29:
movq (%rcx), %r10
movq %r10, (%rsi)
movl %r9d, %r10d
movq -8(%rcx,%r10), %r11
movq %r11, -8(%rsi,%r10)
leaq 8(%rsi), %r10
andq $-8, %r10
subq %r10, %rsi
subq %rsi, %rcx
addl %esi, %r9d
andl $-8, %r9d
cmpl $8, %r9d
jb .L35
andl $-8, %r9d
movl $0, %esi
.L33:
movl %esi, %r11d
movq (%rcx,%r11), %r14
movq %r14, (%r10,%r11)
addl $8, %esi
cmpl %r9d, %esi
jb .L33
.L35:
movq -8(%rdi), %rcx
movq %rcx, 8(%rdx)
addq $32, %rdx
addq $32, %rax
cmpq %rax, %r8
je .L66
.L36:
leaq 16(%rdx), %rsi
movq %rsi, (%rdx)
movq %rax, %rdi
movq -16(%rax), %rcx
cmpq %rcx, %rax
je .L67
movq %rcx, (%rdx)
movq (%rax), %rcx
movq %rcx, 16(%rdx)
jmp .L35
.L66:
movq %rbx, %r14
subq %rbp, %r14
addq %r15, %r14
.L27:
addq $32, %r14
cmpq %r13, %rbx
je .L37
leaq 16(%rbx), %rax
leaq 16(%r13), %rdi
movq %r14, %rdx
jmp .L46
.L55:
movq %r15, %r14
jmp .L27
.L70:
movq -8(%rax), %rsi
addq $1, %rsi
movq %rax, %r11
cmpl $8, %esi
jnb .L39
testb $4, %sil
jne .L68
testl %esi, %esi
je .L45
movzbl (%rax), %r9d
movb %r9b, (%r8)
testb $2, %sil
je .L45
movl %esi, %esi
movzwl -2(%rax,%rsi), %r9d
movw %r9w, -2(%r8,%rsi)
jmp .L45
.L68:
movl (%rax), %r9d
movl %r9d, (%r8)
movl %esi, %esi
movl -4(%rax,%rsi), %r9d
movl %r9d, -4(%r8,%rsi)
jmp .L45
.L39:
movq (%rax), %r9
movq %r9, (%r8)
movl %esi, %r9d
movq -8(%rax,%r9), %r10
movq %r10, -8(%r8,%r9)
leaq 8(%r8), %r10
andq $-8, %r10
subq %r10, %r8
subq %r8, %r11
addl %r8d, %esi
andl $-8, %esi
cmpl $8, %esi
jb .L45
andl $-8, %esi
movl $0, %r8d
movq %rax, 8(%rsp)
.L43:
movl %r8d, %r9d
movq (%r11,%r9), %rax
movq %rax, (%r10,%r9)
addl $8, %r8d
cmpl %esi, %r8d
jb .L43
movq 8(%rsp), %rax
.L45:
movq -8(%rcx), %rcx
movq %rcx, 8(%rdx)
addq $32, %rdx
addq $32, %rax
cmpq %rdi, %rax
je .L69
.L46:
leaq 16(%rdx), %r8
movq %r8, (%rdx)
movq %rax, %rcx
movq -16(%rax), %rsi
cmpq %rax, %rsi
je .L70
movq %rsi, (%rdx)
movq (%rax), %rsi
movq %rsi, 16(%rdx)
jmp .L45
.L69:
subq %rbx, %r13
addq %r13, %r14
.L37:
testq %rbp, %rbp
je .L47
movq 16(%r12), %rsi
subq %rbp, %rsi
movq %rbp, %rdi
call _ZdlPvm@PLT
.L47:
movq %r15, (%r12)
movq %r14, 8(%r12)
movq (%rsp), %rax
salq $5, %rax
addq %r15, %rax
movq %rax, 16(%r12)
addq $24, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L56:
.cfi_restore_state
endbr64
movq %rax, %rdi
call __cxa_begin_catch@PLT
testq %r15, %r15
jne .L49
movq %r14, %rdi
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE10_M_disposeEv@PLT
.L50:
.LEHB1:
call __cxa_rethrow@PLT
.LEHE1:
.L57:
endbr64
movq %rax, %rbx
call __cxa_end_catch@PLT
movq %rbx, %rdi
.LEHB2:
call _Unwind_Resume@PLT
.L49:
movq (%rsp), %rsi
salq $5, %rsi
movq %r15, %rdi
call _ZdlPvm@PLT
jmp .L50
.L25:
movq %rsi, %r14
subq %rbp, %r14
movabsq $288230376151711743, %rax
movq %rax, (%rsp)
.L52:
movq (%rsp), %rdi
salq $5, %rdi
call _Znwm@PLT
.LEHE2:
movq %rax, %r15
.L26:
addq %r15, %r14
leaq 16(%r14), %rax
movq %rax, (%r14)
movq 8(%rsp), %rax
movq (%rax), %rsi
movq %rsi, %rdx
addq 8(%rax), %rdx
movq %r14, %rdi
.LEHB3:
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE12_M_constructIPcEEvT_S7_St20forward_iterator_tag
.LEHE3:
jmp .L71
.cfi_endproc
.LFE4898:
.globl __gxx_personality_v0
.section .gcc_except_table._ZNSt6vectorINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEESaIS5_EE17_M_realloc_insertIJRKS5_EEEvN9__gnu_cxx17__normal_iteratorIPS5_S7_EEDpOT_,"aG",@progbits,_ZNSt6vectorINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEESaIS5_EE17_M_realloc_insertIJRKS5_EEEvN9__gnu_cxx17__normal_iteratorIPS5_S7_EEDpOT_,comdat
.align 4
.LLSDA4898:
.byte 0xff
.byte 0x9b
.uleb128 .LLSDATT4898-.LLSDATTD4898
.LLSDATTD4898:
.byte 0x1
.uleb128 .LLSDACSE4898-.LLSDACSB4898
.LLSDACSB4898:
.uleb128 .LEHB0-.LFB4898
.uleb128 .LEHE0-.LEHB0
.uleb128 0
.uleb128 0
.uleb128 .LEHB1-.LFB4898
.uleb128 .LEHE1-.LEHB1
.uleb128 .L57-.LFB4898
.uleb128 0
.uleb128 .LEHB2-.LFB4898
.uleb128 .LEHE2-.LEHB2
.uleb128 0
.uleb128 0
.uleb128 .LEHB3-.LFB4898
.uleb128 .LEHE3-.LEHB3
.uleb128 .L56-.LFB4898
.uleb128 0x1
.LLSDACSE4898:
.byte 0x1
.byte 0
.align 4
.long 0
.LLSDATT4898:
.section .text._ZNSt6vectorINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEESaIS5_EE17_M_realloc_insertIJRKS5_EEEvN9__gnu_cxx17__normal_iteratorIPS5_S7_EEDpOT_,"axG",@progbits,_ZNSt6vectorINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEESaIS5_EE17_M_realloc_insertIJRKS5_EEEvN9__gnu_cxx17__normal_iteratorIPS5_S7_EEDpOT_,comdat
.size _ZNSt6vectorINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEESaIS5_EE17_M_realloc_insertIJRKS5_EEEvN9__gnu_cxx17__normal_iteratorIPS5_S7_EEDpOT_, .-_ZNSt6vectorINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEESaIS5_EE17_M_realloc_insertIJRKS5_EEEvN9__gnu_cxx17__normal_iteratorIPS5_S7_EEDpOT_
.section .text._ZSt13__copy_move_aILb0ESt16istream_iteratorINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEcS4_lESt20back_insert_iteratorISt6vectorIS6_SaIS6_EEEET1_T0_SE_SD_,"axG",@progbits,_ZSt13__copy_move_aILb0ESt16istream_iteratorINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEcS4_lESt20back_insert_iteratorISt6vectorIS6_SaIS6_EEEET1_T0_SE_SD_,comdat
.weak _ZSt13__copy_move_aILb0ESt16istream_iteratorINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEcS4_lESt20back_insert_iteratorISt6vectorIS6_SaIS6_EEEET1_T0_SE_SD_
.type _ZSt13__copy_move_aILb0ESt16istream_iteratorINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEcS4_lESt20back_insert_iteratorISt6vectorIS6_SaIS6_EEEET1_T0_SE_SD_, @function
_ZSt13__copy_move_aILb0ESt16istream_iteratorINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEcS4_lESt20back_insert_iteratorISt6vectorIS6_SaIS6_EEEET1_T0_SE_SD_:
.LFB4674:
.cfi_startproc
.cfi_personality 0x9b,DW.ref.__gxx_personality_v0
.cfi_lsda 0x1b,.LLSDA4674
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $400, %rsp
.cfi_def_cfa_offset 432
movq %rdi, %rbp
movq %rsi, %r12
movq %rdx, %rbx
movq %fs:40, %rax
movq %rax, 392(%rsp)
xorl %eax, %eax
movq (%rsi), %rax
movq %rax, 96(%rsp)
leaq 120(%rsp), %rax
movq %rax, 104(%rsp)
movq 8(%rsi), %rsi
movq %rsi, %rdx
addq 16(%r12), %rdx
leaq 104(%rsp), %rdi
.LEHB4:
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE12_M_constructIPcEEvT_S7_St20forward_iterator_tag
.LEHE4:
movzbl 40(%r12), %eax
movb %al, 136(%rsp)
movq 96(%rsp), %rax
movq %rax, 144(%rsp)
leaq 168(%rsp), %rax
movq %rax, 152(%rsp)
movq 104(%rsp), %rsi
movq %rsi, %rdx
addq 112(%rsp), %rdx
leaq 152(%rsp), %rdi
.LEHB5:
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE12_M_constructIPcEEvT_S7_St20forward_iterator_tag
.LEHE5:
movzbl 136(%rsp), %eax
movb %al, 184(%rsp)
movq 0(%rbp), %rax
movq %rax, (%rsp)
leaq 24(%rsp), %rax
movq %rax, 8(%rsp)
movq 8(%rbp), %rsi
movq %rsi, %rdx
addq 16(%rbp), %rdx
leaq 8(%rsp), %rdi
.LEHB6:
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE12_M_constructIPcEEvT_S7_St20forward_iterator_tag
.LEHE6:
movzbl 40(%rbp), %eax
movb %al, 40(%rsp)
movq (%rsp), %rax
movq %rax, 48(%rsp)
leaq 72(%rsp), %rax
movq %rax, 56(%rsp)
movq 8(%rsp), %rsi
movq %rsi, %rdx
addq 16(%rsp), %rdx
leaq 56(%rsp), %rdi
.LEHB7:
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE12_M_constructIPcEEvT_S7_St20forward_iterator_tag
.LEHE7:
movzbl 40(%rsp), %eax
movb %al, 88(%rsp)
movq 144(%rsp), %rax
movq %rax, 240(%rsp)
leaq 264(%rsp), %rax
movq %rax, 248(%rsp)
movq 152(%rsp), %rsi
movq %rsi, %rdx
addq 160(%rsp), %rdx
leaq 248(%rsp), %rdi
.LEHB8:
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE12_M_constructIPcEEvT_S7_St20forward_iterator_tag
.LEHE8:
movzbl 184(%rsp), %eax
movb %al, 280(%rsp)
movq 48(%rsp), %rax
movq %rax, 192(%rsp)
leaq 216(%rsp), %rax
movq %rax, 200(%rsp)
movq 56(%rsp), %rsi
movq %rsi, %rdx
addq 64(%rsp), %rdx
leaq 200(%rsp), %rdi
.LEHB9:
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE12_M_constructIPcEEvT_S7_St20forward_iterator_tag
.LEHE9:
movzbl 88(%rsp), %eax
movb %al, 232(%rsp)
movq 240(%rsp), %rax
movq %rax, 336(%rsp)
leaq 360(%rsp), %rax
movq %rax, 344(%rsp)
movq 248(%rsp), %rsi
movq %rsi, %rdx
addq 256(%rsp), %rdx
leaq 344(%rsp), %rdi
.LEHB10:
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE12_M_constructIPcEEvT_S7_St20forward_iterator_tag
.LEHE10:
movzbl 280(%rsp), %eax
movb %al, 376(%rsp)
movq 192(%rsp), %rax
movq %rax, 288(%rsp)
leaq 312(%rsp), %rax
movq %rax, 296(%rsp)
movq 200(%rsp), %rsi
movq %rsi, %rdx
addq 208(%rsp), %rdx
leaq 296(%rsp), %rdi
.LEHB11:
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE12_M_constructIPcEEvT_S7_St20forward_iterator_tag
.LEHE11:
movzbl 232(%rsp), %eax
movb %al, 328(%rsp)
leaq 296(%rsp), %rbp
.L77:
movzbl 328(%rsp), %eax
cmpb 376(%rsp), %al
je .L114
.L73:
movq 8(%rbx), %rdi
cmpq 16(%rbx), %rdi
je .L75
leaq 16(%rdi), %rax
movq %rax, (%rdi)
movq 296(%rsp), %rsi
movq %rsi, %rdx
addq 304(%rsp), %rdx
.LEHB12:
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE12_M_constructIPcEEvT_S7_St20forward_iterator_tag
jmp .L115
.L114:
testb %al, %al
je .L74
movq 336(%rsp), %rax
cmpq %rax, 288(%rsp)
jne .L73
.L74:
movq 296(%rsp), %rdi
leaq 312(%rsp), %rax
cmpq %rax, %rdi
je .L80
movq 312(%rsp), %rax
leaq 1(%rax), %rsi
call _ZdlPvm@PLT
.L80:
movq 344(%rsp), %rdi
leaq 360(%rsp), %rax
cmpq %rax, %rdi
je .L81
movq 360(%rsp), %rax
leaq 1(%rax), %rsi
call _ZdlPvm@PLT
.L81:
movq 200(%rsp), %rdi
leaq 216(%rsp), %rax
cmpq %rax, %rdi
je .L82
movq 216(%rsp), %rax
leaq 1(%rax), %rsi
call _ZdlPvm@PLT
.L82:
movq 248(%rsp), %rdi
leaq 264(%rsp), %rax
cmpq %rax, %rdi
je .L87
movq 264(%rsp), %rax
leaq 1(%rax), %rsi
call _ZdlPvm@PLT
.L87:
movq 56(%rsp), %rdi
leaq 72(%rsp), %rax
cmpq %rax, %rdi
je .L88
movq 72(%rsp), %rax
leaq 1(%rax), %rsi
call _ZdlPvm@PLT
.L88:
movq 8(%rsp), %rdi
leaq 24(%rsp), %rax
cmpq %rax, %rdi
je .L92
movq 24(%rsp), %rax
leaq 1(%rax), %rsi
call _ZdlPvm@PLT
.L92:
movq 152(%rsp), %rdi
leaq 168(%rsp), %rax
cmpq %rax, %rdi
je .L93
movq 168(%rsp), %rax
leaq 1(%rax), %rsi
call _ZdlPvm@PLT
.L93:
movq 104(%rsp), %rdi
leaq 120(%rsp), %rax
cmpq %rax, %rdi
je .L94
movq 120(%rsp), %rax
leaq 1(%rax), %rsi
call _ZdlPvm@PLT
.L94:
movq 392(%rsp), %rax
subq %fs:40, %rax
jne .L116
movq %rbx, %rax
addq $400, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L115:
.cfi_restore_state
addq $32, 8(%rbx)
.L76:
movq 288(%rsp), %rdi
testq %rdi, %rdi
je .L77
movq %rbp, %rsi
call _ZStrsIcSt11char_traitsIcESaIcEERSt13basic_istreamIT_T0_ES7_RNSt7__cxx1112basic_stringIS4_S5_T1_EE@PLT
jmp .L117
.L75:
movq %rbp, %rdx
movq %rdi, %rsi
movq %rbx, %rdi
call _ZNSt6vectorINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEESaIS5_EE17_M_realloc_insertIJRKS5_EEEvN9__gnu_cxx17__normal_iteratorIPS5_S7_EEDpOT_
.LEHE12:
jmp .L76
.L117:
movq (%rax), %rdx
movq -24(%rdx), %rdx
testb $5, 32(%rax,%rdx)
je .L77
movq $0, 288(%rsp)
movb $0, 328(%rsp)
jmp .L77
.L107:
endbr64
movq %rax, %rbx
leaq 296(%rsp), %rdi
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE10_M_disposeEv@PLT
.L85:
leaq 344(%rsp), %rdi
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE10_M_disposeEv@PLT
.L86:
leaq 200(%rsp), %rdi
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE10_M_disposeEv@PLT
.L90:
leaq 248(%rsp), %rdi
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE10_M_disposeEv@PLT
.L91:
leaq 56(%rsp), %rdi
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE10_M_disposeEv@PLT
.L95:
leaq 8(%rsp), %rdi
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE10_M_disposeEv@PLT
.L96:
leaq 152(%rsp), %rdi
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE10_M_disposeEv@PLT
.L97:
leaq 104(%rsp), %rdi
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE10_M_disposeEv@PLT
movq 392(%rsp), %rax
subq %fs:40, %rax
je .L98
call __stack_chk_fail@PLT
.L106:
endbr64
movq %rax, %rbx
jmp .L85
.L105:
endbr64
movq %rax, %rbx
jmp .L86
.L104:
endbr64
movq %rax, %rbx
jmp .L90
.L103:
endbr64
movq %rax, %rbx
jmp .L91
.L102:
endbr64
movq %rax, %rbx
jmp .L95
.L101:
endbr64
movq %rax, %rbx
jmp .L96
.L100:
endbr64
movq %rax, %rbx
jmp .L97
.L98:
movq %rbx, %rdi
.LEHB13:
call _Unwind_Resume@PLT
.LEHE13:
.L116:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE4674:
.section .gcc_except_table._ZSt13__copy_move_aILb0ESt16istream_iteratorINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEcS4_lESt20back_insert_iteratorISt6vectorIS6_SaIS6_EEEET1_T0_SE_SD_,"aG",@progbits,_ZSt13__copy_move_aILb0ESt16istream_iteratorINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEcS4_lESt20back_insert_iteratorISt6vectorIS6_SaIS6_EEEET1_T0_SE_SD_,comdat
.LLSDA4674:
.byte 0xff
.byte 0xff
.byte 0x1
.uleb128 .LLSDACSE4674-.LLSDACSB4674
.LLSDACSB4674:
.uleb128 .LEHB4-.LFB4674
.uleb128 .LEHE4-.LEHB4
.uleb128 0
.uleb128 0
.uleb128 .LEHB5-.LFB4674
.uleb128 .LEHE5-.LEHB5
.uleb128 .L100-.LFB4674
.uleb128 0
.uleb128 .LEHB6-.LFB4674
.uleb128 .LEHE6-.LEHB6
.uleb128 .L101-.LFB4674
.uleb128 0
.uleb128 .LEHB7-.LFB4674
.uleb128 .LEHE7-.LEHB7
.uleb128 .L102-.LFB4674
.uleb128 0
.uleb128 .LEHB8-.LFB4674
.uleb128 .LEHE8-.LEHB8
.uleb128 .L103-.LFB4674
.uleb128 0
.uleb128 .LEHB9-.LFB4674
.uleb128 .LEHE9-.LEHB9
.uleb128 .L104-.LFB4674
.uleb128 0
.uleb128 .LEHB10-.LFB4674
.uleb128 .LEHE10-.LEHB10
.uleb128 .L105-.LFB4674
.uleb128 0
.uleb128 .LEHB11-.LFB4674
.uleb128 .LEHE11-.LEHB11
.uleb128 .L106-.LFB4674
.uleb128 0
.uleb128 .LEHB12-.LFB4674
.uleb128 .LEHE12-.LEHB12
.uleb128 .L107-.LFB4674
.uleb128 0
.uleb128 .LEHB13-.LFB4674
.uleb128 .LEHE13-.LEHB13
.uleb128 0
.uleb128 0
.LLSDACSE4674:
.section .text._ZSt13__copy_move_aILb0ESt16istream_iteratorINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEcS4_lESt20back_insert_iteratorISt6vectorIS6_SaIS6_EEEET1_T0_SE_SD_,"axG",@progbits,_ZSt13__copy_move_aILb0ESt16istream_iteratorINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEcS4_lESt20back_insert_iteratorISt6vectorIS6_SaIS6_EEEET1_T0_SE_SD_,comdat
.size _ZSt13__copy_move_aILb0ESt16istream_iteratorINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEcS4_lESt20back_insert_iteratorISt6vectorIS6_SaIS6_EEEET1_T0_SE_SD_, .-_ZSt13__copy_move_aILb0ESt16istream_iteratorINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEcS4_lESt20back_insert_iteratorISt6vectorIS6_SaIS6_EEEET1_T0_SE_SD_
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC2:
.string "basic_string: construction from null is not valid"
.text
.globl main
.type main, @function
main:
.LFB4108:
.cfi_startproc
.cfi_personality 0x9b,DW.ref.__gxx_personality_v0
.cfi_lsda 0x1b,.LLSDA4108
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $856, %rsp
.cfi_def_cfa_offset 912
movq %fs:40, %rax
movq %rax, 840(%rsp)
xorl %eax, %eax
leaq 144(%rsp), %rax
movq %rax, 128(%rsp)
movq $0, 136(%rsp)
movb $0, 144(%rsp)
movq _ZSt3cin(%rip), %rax
movq -24(%rax), %rax
leaq _ZSt3cin(%rip), %rdx
movq 240(%rdx,%rax), %rbx
testq %rbx, %rbx
je .L119
movl $0, %r15d
movl $-1, %r13d
movq %rdx, %r12
jmp .L120
.L207:
movq %rax, %r14
movq %rax, 40(%rsp)
movq %rbx, %rdi
.LEHB14:
call _Znam@PLT
movq %rax, 24(%rsp)
movq %rbx, %rdi
call _Znam@PLT
movq %rax, 32(%rsp)
movq %r14, 16(%rsp)
.L122:
movq (%r12), %rax
movq -24(%rax), %rax
movq 240(%r12,%rax), %rbx
testq %rbx, %rbx
je .L119
.L120:
cmpb $0, 56(%rbx)
je .L163
movzbl 67(%rbx), %edx
.L164:
movsbl %dl, %edx
leaq 128(%rsp), %rsi
movq %r12, %rdi
call _ZSt7getlineIcSt11char_traitsIcESaIcEERSt13basic_istreamIT_T0_ES7_RNSt7__cxx1112basic_stringIS4_S5_T1_EES4_@PLT
.LEHE14:
jmp .L196
.L121:
movq $0, 96(%rsp)
movq $0, 104(%rsp)
movq $0, 112(%rsp)
leaq 448(%rsp), %rbx
leaq 568(%rsp), %rdi
call _ZNSt8ios_baseC2Ev@PLT
leaq 16+_ZTVSt9basic_iosIcSt11char_traitsIcEE(%rip), %rax
movq %rax, 568(%rsp)
movq $0, 784(%rsp)
movb $0, 792(%rsp)
movb $0, 793(%rsp)
movq $0, 800(%rsp)
movq $0, 808(%rsp)
movq $0, 816(%rsp)
movq $0, 824(%rsp)
movq 8+_ZTTNSt7__cxx1119basic_istringstreamIcSt11char_traitsIcESaIcEEE(%rip), %r14
movq %r14, 448(%rsp)
movq -24(%r14), %rax
movq 16+_ZTTNSt7__cxx1119basic_istringstreamIcSt11char_traitsIcESaIcEEE(%rip), %rcx
movq %rcx, 448(%rsp,%rax)
movq $0, 456(%rsp)
movq 448(%rsp), %rax
addq -24(%rax), %rbx
movq %rbx, %rdi
movl $0, %esi
.LEHB15:
call _ZNSt9basic_iosIcSt11char_traitsIcEE4initEPSt15basic_streambufIcS1_E@PLT
.LEHE15:
leaq 24+_ZTVNSt7__cxx1119basic_istringstreamIcSt11char_traitsIcESaIcEEE(%rip), %rax
movq %rax, 448(%rsp)
leaq 40(%rax), %rax
movq %rax, 568(%rsp)
leaq 16+_ZTVSt15basic_streambufIcSt11char_traitsIcEE(%rip), %rax
movq %rax, 464(%rsp)
movq $0, 472(%rsp)
movq $0, 480(%rsp)
movq $0, 488(%rsp)
movq $0, 496(%rsp)
movq $0, 504(%rsp)
movq $0, 512(%rsp)
leaq 520(%rsp), %rdi
call _ZNSt6localeC1Ev@PLT
leaq 16+_ZTVNSt7__cxx1115basic_stringbufIcSt11char_traitsIcESaIcEEE(%rip), %rax
movq %rax, 464(%rsp)
movl $0, 528(%rsp)
movq 136(%rsp), %rbx
movq 128(%rsp), %rbp
leaq 552(%rsp), %rax
movq %rax, 536(%rsp)
testq %rbp, %rbp
jne .L123
testq %rbx, %rbx
jne .L197
.L123:
movq %rbx, 80(%rsp)
cmpq $15, %rbx
ja .L198
cmpq $1, %rbx
jne .L127
movzbl 0(%rbp), %eax
movb %al, 552(%rsp)
.L128:
movq 80(%rsp), %rax
movq %rax, 544(%rsp)
movq 536(%rsp), %rdx
movb $0, (%rdx,%rax)
movl $8, 528(%rsp)
leaq 464(%rsp), %rdi
movl $0, %ecx
movl $0, %edx
movq 536(%rsp), %rsi
.LEHB16:
call _ZNSt7__cxx1115basic_stringbufIcSt11char_traitsIcESaIcEE7_M_syncEPcmm@PLT
.LEHE16:
jmp .L199
.L197:
movq 840(%rsp), %rax
subq %fs:40, %rax
jne .L200
leaq .LC2(%rip), %rdi
.LEHB17:
call _ZSt19__throw_logic_errorPKc@PLT
.L180:
endbr64
movq %rax, %rbx
jmp .L131
.L200:
call __stack_chk_fail@PLT
.L198:
leaq 80(%rsp), %rsi
leaq 536(%rsp), %rdi
movl $0, %edx
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE9_M_createERmm@PLT
.LEHE17:
movq %rax, %rdi
movq %rax, 536(%rsp)
movq 80(%rsp), %rax
movq %rax, 552(%rsp)
.L126:
movq %rbx, %rdx
movq %rbp, %rsi
call memcpy@PLT
jmp .L128
.L127:
testq %rbx, %rbx
je .L128
leaq 552(%rsp), %rdi
jmp .L126
.L181:
endbr64
movq %rax, %rbx
leaq 536(%rsp), %rdi
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE10_M_disposeEv@PLT
.L131:
leaq 16+_ZTVSt15basic_streambufIcSt11char_traitsIcEE(%rip), %rax
movq %rax, 464(%rsp)
leaq 520(%rsp), %rdi
call _ZNSt6localeD1Ev@PLT
.L132:
movq %r14, 448(%rsp)
movq -24(%r14), %rax
movq 16+_ZTTNSt7__cxx1119basic_istringstreamIcSt11char_traitsIcESaIcEEE(%rip), %rcx
movq %rcx, 448(%rsp,%rax)
movq $0, 456(%rsp)
.L136:
leaq 16+_ZTVSt9basic_iosIcSt11char_traitsIcEE(%rip), %rax
movq %rax, 568(%rsp)
leaq 568(%rsp), %rdi
call _ZNSt8ios_baseD2Ev@PLT
.L137:
movq 104(%rsp), %r12
movq 96(%rsp), %rbp
.L167:
cmpq %rbp, %r12
jne .L169
movq 96(%rsp), %rdi
movq 112(%rsp), %rsi
subq %rdi, %rsi
testq %rdi, %rdi
je .L171
call _ZdlPvm@PLT
.L171:
leaq 128(%rsp), %rdi
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE10_M_disposeEv@PLT
movq 840(%rsp), %rax
subq %fs:40, %rax
je .L172
call __stack_chk_fail@PLT
.L199:
leaq 464(%rsp), %rsi
leaq 568(%rsp), %rdi
.LEHB18:
call _ZNSt9basic_iosIcSt11char_traitsIcEE4initEPSt15basic_streambufIcS1_E@PLT
.LEHE18:
jmp .L201
.L179:
endbr64
movq %rax, %rbx
leaq 16+_ZTVNSt7__cxx1115basic_stringbufIcSt11char_traitsIcESaIcEEE(%rip), %rax
movq %rax, 464(%rsp)
movq 536(%rsp), %rdi
leaq 552(%rsp), %rax
cmpq %rax, %rdi
je .L135
movq 552(%rsp), %rax
leaq 1(%rax), %rsi
call _ZdlPvm@PLT
.L135:
leaq 16+_ZTVSt15basic_streambufIcSt11char_traitsIcEE(%rip), %rax
movq %rax, 464(%rsp)
leaq 520(%rsp), %rdi
call _ZNSt6localeD1Ev@PLT
jmp .L132
.L178:
endbr64
movq %rax, %rbx
jmp .L136
.L201:
movq $0, 208(%rsp)
leaq 232(%rsp), %rax
movq %rax, 216(%rsp)
movq $0, 224(%rsp)
movb $0, 232(%rsp)
movb $0, 248(%rsp)
leaq 448(%rsp), %rdi
movq %rdi, 160(%rsp)
leaq 184(%rsp), %rax
movq %rax, 168(%rsp)
movq $0, 176(%rsp)
movb $0, 184(%rsp)
movb $1, 200(%rsp)
leaq 168(%rsp), %rsi
.LEHB19:
call _ZStrsIcSt11char_traitsIcESaIcEERSt13basic_istreamIT_T0_ES7_RNSt7__cxx1112basic_stringIS4_S5_T1_EE@PLT
.LEHE19:
movq (%rax), %rdx
movq -24(%rdx), %rdx
testb $5, 32(%rax,%rdx)
je .L138
movq $0, 160(%rsp)
movb $0, 200(%rsp)
.L138:
leaq 96(%rsp), %rbx
movq 208(%rsp), %rax
movq %rax, 352(%rsp)
leaq 376(%rsp), %rax
movq %rax, 360(%rsp)
movq 216(%rsp), %rsi
movq %rsi, %rdx
addq 224(%rsp), %rdx
leaq 360(%rsp), %rdi
.LEHB20:
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE12_M_constructIPcEEvT_S7_St20forward_iterator_tag
.LEHE20:
jmp .L202
.L182:
endbr64
movq %rax, %rbx
leaq 168(%rsp), %rdi
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE10_M_disposeEv@PLT
.L141:
leaq 216(%rsp), %rdi
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE10_M_disposeEv@PLT
leaq 448(%rsp), %rdi
call _ZNSt7__cxx1119basic_istringstreamIcSt11char_traitsIcESaIcEED1Ev@PLT
jmp .L137
.L202:
movzbl 248(%rsp), %eax
movb %al, 392(%rsp)
movq 352(%rsp), %rax
movq %rax, 400(%rsp)
leaq 424(%rsp), %rax
movq %rax, 408(%rsp)
movq 360(%rsp), %rsi
movq %rsi, %rdx
addq 368(%rsp), %rdx
leaq 408(%rsp), %rdi
.LEHB21:
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE12_M_constructIPcEEvT_S7_St20forward_iterator_tag
.LEHE21:
movzbl 392(%rsp), %eax
movb %al, 440(%rsp)
movq 160(%rsp), %rax
movq %rax, 256(%rsp)
leaq 280(%rsp), %rax
movq %rax, 264(%rsp)
movq 168(%rsp), %rsi
movq %rsi, %rdx
addq 176(%rsp), %rdx
leaq 264(%rsp), %rdi
.LEHB22:
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE12_M_constructIPcEEvT_S7_St20forward_iterator_tag
.LEHE22:
movzbl 200(%rsp), %eax
movb %al, 296(%rsp)
movq 256(%rsp), %rax
movq %rax, 304(%rsp)
leaq 328(%rsp), %rax
movq %rax, 312(%rsp)
movq 264(%rsp), %rsi
movq %rsi, %rdx
addq 272(%rsp), %rdx
leaq 312(%rsp), %rdi
.LEHB23:
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE12_M_constructIPcEEvT_S7_St20forward_iterator_tag
.LEHE23:
movzbl 296(%rsp), %eax
movb %al, 344(%rsp)
leaq 400(%rsp), %rsi
leaq 304(%rsp), %rdi
movq %rbx, %rdx
.LEHB24:
call _ZSt13__copy_move_aILb0ESt16istream_iteratorINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEcS4_lESt20back_insert_iteratorISt6vectorIS6_SaIS6_EEEET1_T0_SE_SD_
.LEHE24:
movq 312(%rsp), %rdi
leaq 328(%rsp), %rax
cmpq %rax, %rdi
je .L142
movq 328(%rsp), %rax
leaq 1(%rax), %rsi
call _ZdlPvm@PLT
.L142:
movq 264(%rsp), %rdi
leaq 280(%rsp), %rax
cmpq %rax, %rdi
je .L143
movq 280(%rsp), %rax
leaq 1(%rax), %rsi
call _ZdlPvm@PLT
.L143:
movq 408(%rsp), %rdi
leaq 424(%rsp), %rax
cmpq %rax, %rdi
je .L144
movq 424(%rsp), %rax
leaq 1(%rax), %rsi
call _ZdlPvm@PLT
.L144:
movq 360(%rsp), %rdi
leaq 376(%rsp), %rax
cmpq %rax, %rdi
je .L145
movq 376(%rsp), %rax
leaq 1(%rax), %rsi
call _ZdlPvm@PLT
.L145:
movq 168(%rsp), %rdi
leaq 184(%rsp), %rax
cmpq %rax, %rdi
je .L146
movq 184(%rsp), %rax
leaq 1(%rax), %rsi
call _ZdlPvm@PLT
.L146:
movq 216(%rsp), %rdi
leaq 232(%rsp), %rax
cmpq %rax, %rdi
je .L153
movq 232(%rsp), %rax
leaq 1(%rax), %rsi
call _ZdlPvm@PLT
.L153:
movq 96(%rsp), %rdx
cmpq %rdx, 104(%rsp)
je .L154
movl %r13d, %eax
imull %r15d, %eax
cltq
movl $0, %ebx
movq 16(%rsp), %rcx
leaq (%rcx,%rax,4), %rbp
.L155:
movq %rbx, %rax
salq $5, %rax
movq (%rdx,%rax), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movl %eax, 0(%rbp,%rbx,4)
movq 96(%rsp), %rdx
addq $1, %rbx
movq 104(%rsp), %rax
subq %rdx, %rax
sarq $5, %rax
cmpq %rax, %rbx
jb .L155
.L154:
addl $1, %r15d
cmpl %r13d, %r15d
je .L203
.L156:
leaq 24+_ZTVNSt7__cxx1119basic_istringstreamIcSt11char_traitsIcESaIcEEE(%rip), %rax
movq %rax, 448(%rsp)
leaq 40(%rax), %rax
movq %rax, 568(%rsp)
leaq 16+_ZTVNSt7__cxx1115basic_stringbufIcSt11char_traitsIcESaIcEEE(%rip), %rax
movq %rax, 464(%rsp)
movq 536(%rsp), %rdi
leaq 552(%rsp), %rax
cmpq %rax, %rdi
je .L157
movq 552(%rsp), %rax
leaq 1(%rax), %rsi
call _ZdlPvm@PLT
.L157:
leaq 16+_ZTVSt15basic_streambufIcSt11char_traitsIcEE(%rip), %rax
movq %rax, 464(%rsp)
leaq 520(%rsp), %rdi
call _ZNSt6localeD1Ev@PLT
movq %r14, 448(%rsp)
movq -24(%r14), %rax
movq 16+_ZTTNSt7__cxx1119basic_istringstreamIcSt11char_traitsIcESaIcEEE(%rip), %rcx
movq %rcx, 448(%rsp,%rax)
movq $0, 456(%rsp)
leaq 16+_ZTVSt9basic_iosIcSt11char_traitsIcEE(%rip), %rax
movq %rax, 568(%rsp)
leaq 568(%rsp), %rdi
call _ZNSt8ios_baseD2Ev@PLT
movq 104(%rsp), %rbp
movq 96(%rsp), %rbx
cmpq %rbx, %rbp
jne .L160
.L158:
movq 96(%rsp), %rdi
testq %rdi, %rdi
je .L122
movq 112(%rsp), %rsi
subq %rdi, %rsi
call _ZdlPvm@PLT
jmp .L122
.L186:
endbr64
movq %rax, %rbx
leaq 312(%rsp), %rdi
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE10_M_disposeEv@PLT
.L149:
leaq 264(%rsp), %rdi
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE10_M_disposeEv@PLT
.L150:
leaq 408(%rsp), %rdi
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE10_M_disposeEv@PLT
.L151:
leaq 360(%rsp), %rdi
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE10_M_disposeEv@PLT
.L152:
leaq 168(%rsp), %rdi
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE10_M_disposeEv@PLT
jmp .L141
.L185:
endbr64
movq %rax, %rbx
jmp .L149
.L184:
endbr64
movq %rax, %rbx
jmp .L150
.L183:
endbr64
movq %rax, %rbx
jmp .L151
.L203:
movl 12(%rsp), %r15d
movq 24(%rsp), %rax
movq %rax, 16(%rsp)
jmp .L156
.L204:
movq 16(%rbx), %rax
leaq 1(%rax), %rsi
call _ZdlPvm@PLT
.L159:
addq $32, %rbx
cmpq %rbx, %rbp
je .L158
.L160:
movq (%rbx), %rdi
leaq 16(%rbx), %rax
cmpq %rax, %rdi
jne .L204
jmp .L159
.L119:
movq 840(%rsp), %rax
subq %fs:40, %rax
jne .L205
.LEHB25:
call _ZSt16__throw_bad_castv@PLT
.L176:
endbr64
movq %rax, %rbx
jmp .L171
.L205:
call __stack_chk_fail@PLT
.L163:
movq %rbx, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq (%rbx), %rax
movl $10, %esi
movq %rbx, %rdi
call *48(%rax)
movl %eax, %edx
jmp .L164
.L196:
movq (%rax), %rdx
movq -24(%rdx), %rdx
movl 32(%rax,%rdx), %eax
andl $5, %eax
movl %eax, 12(%rsp)
jne .L206
cmpl $-1, %r13d
jne .L121
movl $10, %edx
movl $0, %esi
movq 128(%rsp), %rdi
call __isoc23_strtol@PLT
movl %eax, %r13d
imull %eax, %eax
movslq %eax, %rbx
salq $2, %rbx
movq %rbx, %rdi
call _Znam@PLT
jmp .L207
.L206:
movslq %r13d, %rbx
imulq %rbx, %rbx
salq $2, %rbx
leaq 56(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leaq 64(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leaq 72(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbx, %rdx
movq 40(%rsp), %rsi
movq 56(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movq %rbx, %rdx
movq 24(%rsp), %rsi
movq 64(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movq %rbx, %rdx
movq 32(%rsp), %rsi
movq 72(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, 84(%rsp)
movl $1, 88(%rsp)
movl %r13d, %eax
imull %r13d, %eax
shrl $6, %eax
movl %eax, 96(%rsp)
movl $1, 100(%rsp)
movl $1, 104(%rsp)
movl $64, 80(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 80(%rsp), %rdx
movl $1, %ecx
movq 96(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L166
movl %r13d, %ecx
movq 72(%rsp), %rdx
movq 64(%rsp), %rsi
movq 56(%rsp), %rdi
call _Z32__device_stub__Z8multiplyPiS_S_iPiS_S_i
.L166:
movl $2, %ecx
movq %rbx, %rdx
movq 40(%rsp), %rsi
movq 56(%rsp), %rdi
call cudaMemcpy@PLT
movl $2, %ecx
movq %rbx, %rdx
movq 24(%rsp), %rsi
movq 64(%rsp), %rdi
call cudaMemcpy@PLT
movl $2, %ecx
movq %rbx, %rdx
movq 32(%rsp), %rsi
movq 72(%rsp), %rdi
call cudaMemcpy@PLT
.LEHE25:
leaq 128(%rsp), %rdi
call _ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE10_M_disposeEv@PLT
movq 840(%rsp), %rax
subq %fs:40, %rax
jne .L208
movl $0, %eax
addq $856, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L177:
.cfi_restore_state
endbr64
movq %rax, %rbx
jmp .L152
.L169:
movq 0(%rbp), %rdi
leaq 16(%rbp), %rax
cmpq %rax, %rdi
je .L168
movq 16(%rbp), %rax
leaq 1(%rax), %rsi
call _ZdlPvm@PLT
.L168:
addq $32, %rbp
jmp .L167
.L172:
movq %rbx, %rdi
.LEHB26:
call _Unwind_Resume@PLT
.LEHE26:
.L208:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE4108:
.section .gcc_except_table,"a",@progbits
.LLSDA4108:
.byte 0xff
.byte 0xff
.byte 0x1
.uleb128 .LLSDACSE4108-.LLSDACSB4108
.LLSDACSB4108:
.uleb128 .LEHB14-.LFB4108
.uleb128 .LEHE14-.LEHB14
.uleb128 .L176-.LFB4108
.uleb128 0
.uleb128 .LEHB15-.LFB4108
.uleb128 .LEHE15-.LEHB15
.uleb128 .L178-.LFB4108
.uleb128 0
.uleb128 .LEHB16-.LFB4108
.uleb128 .LEHE16-.LEHB16
.uleb128 .L181-.LFB4108
.uleb128 0
.uleb128 .LEHB17-.LFB4108
.uleb128 .LEHE17-.LEHB17
.uleb128 .L180-.LFB4108
.uleb128 0
.uleb128 .LEHB18-.LFB4108
.uleb128 .LEHE18-.LEHB18
.uleb128 .L179-.LFB4108
.uleb128 0
.uleb128 .LEHB19-.LFB4108
.uleb128 .LEHE19-.LEHB19
.uleb128 .L182-.LFB4108
.uleb128 0
.uleb128 .LEHB20-.LFB4108
.uleb128 .LEHE20-.LEHB20
.uleb128 .L177-.LFB4108
.uleb128 0
.uleb128 .LEHB21-.LFB4108
.uleb128 .LEHE21-.LEHB21
.uleb128 .L183-.LFB4108
.uleb128 0
.uleb128 .LEHB22-.LFB4108
.uleb128 .LEHE22-.LEHB22
.uleb128 .L184-.LFB4108
.uleb128 0
.uleb128 .LEHB23-.LFB4108
.uleb128 .LEHE23-.LEHB23
.uleb128 .L185-.LFB4108
.uleb128 0
.uleb128 .LEHB24-.LFB4108
.uleb128 .LEHE24-.LEHB24
.uleb128 .L186-.LFB4108
.uleb128 0
.uleb128 .LEHB25-.LFB4108
.uleb128 .LEHE25-.LEHB25
.uleb128 .L176-.LFB4108
.uleb128 0
.uleb128 .LEHB26-.LFB4108
.uleb128 .LEHE26-.LEHB26
.uleb128 0
.uleb128 0
.LLSDACSE4108:
.text
.size main, .-main
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.hidden DW.ref.__gxx_personality_v0
.weak DW.ref.__gxx_personality_v0
.section .data.rel.local.DW.ref.__gxx_personality_v0,"awG",@progbits,DW.ref.__gxx_personality_v0,comdat
.align 8
.type DW.ref.__gxx_personality_v0, @object
.size DW.ref.__gxx_personality_v0, 8
DW.ref.__gxx_personality_v0:
.quad __gxx_personality_v0
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <iostream>
#include <stdlib.h>
#include <string>
#include <vector>
#include <sstream>
#include <cuda.h>
#include <iterator>
using namespace std;
__global__ void multiply(int *A, int *B, int *C, int N) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int i = idx / N, j = idx % N;
int sum = 0;
for (int k = 0; k < N; k++) {
sum += A[i * N + k] * B[k * N + j];
}
C[idx] = sum;
}
int main() {
string line;
int N = -1;
int *A, *B, *C, *cur;
int count = 0;
while(getline(cin, line)) {
if (N == -1) {
N = atoi(line.c_str());
A = new int[N * N];
B = new int[N * N];
C = new int[N * N];
cur = A;
} else {
vector<string> nums;
istringstream iss(line);
copy(istream_iterator<string>(iss),
istream_iterator<string>(),
back_inserter(nums));
for (int i = 0; i < nums.size(); i++) {
cur[count * N + i] = atoi(nums[i].c_str());
}
count++;
if (count == N) {
count = 0;
cur = B;
}
}
}
int *dA, *dB, *dC;
cudaMalloc(&dA, sizeof(int) * N * N);
cudaMalloc(&dB, sizeof(int) * N * N);
cudaMalloc(&dC, sizeof(int) * N * N);
cudaMemcpy(dA, A, sizeof(int) * N * N, cudaMemcpyHostToDevice);
cudaMemcpy(dB, B, sizeof(int) * N * N, cudaMemcpyHostToDevice);
cudaMemcpy(dC, C, sizeof(int) * N * N, cudaMemcpyHostToDevice);
dim3 blockDim(64, 1, 1);
dim3 gridDim(N * N / blockDim.x, 1, 1);
multiply<<<gridDim, blockDim>>>(dA, dB, dC, N);
cudaMemcpy(dA, A, sizeof(int) * N * N, cudaMemcpyDeviceToHost);
cudaMemcpy(dB, B, sizeof(int) * N * N, cudaMemcpyDeviceToHost);
cudaMemcpy(dC, C, sizeof(int) * N * N, cudaMemcpyDeviceToHost);
return 0;
} | #include <iostream>
#include <stdlib.h>
#include <string>
#include <vector>
#include <sstream>
#include <hip/hip_runtime.h>
#include <iterator>
using namespace std;
__global__ void multiply(int *A, int *B, int *C, int N) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int i = idx / N, j = idx % N;
int sum = 0;
for (int k = 0; k < N; k++) {
sum += A[i * N + k] * B[k * N + j];
}
C[idx] = sum;
}
int main() {
string line;
int N = -1;
int *A, *B, *C, *cur;
int count = 0;
while(getline(cin, line)) {
if (N == -1) {
N = atoi(line.c_str());
A = new int[N * N];
B = new int[N * N];
C = new int[N * N];
cur = A;
} else {
vector<string> nums;
istringstream iss(line);
copy(istream_iterator<string>(iss),
istream_iterator<string>(),
back_inserter(nums));
for (int i = 0; i < nums.size(); i++) {
cur[count * N + i] = atoi(nums[i].c_str());
}
count++;
if (count == N) {
count = 0;
cur = B;
}
}
}
int *dA, *dB, *dC;
hipMalloc(&dA, sizeof(int) * N * N);
hipMalloc(&dB, sizeof(int) * N * N);
hipMalloc(&dC, sizeof(int) * N * N);
hipMemcpy(dA, A, sizeof(int) * N * N, hipMemcpyHostToDevice);
hipMemcpy(dB, B, sizeof(int) * N * N, hipMemcpyHostToDevice);
hipMemcpy(dC, C, sizeof(int) * N * N, hipMemcpyHostToDevice);
dim3 blockDim(64, 1, 1);
dim3 gridDim(N * N / blockDim.x, 1, 1);
multiply<<<gridDim, blockDim>>>(dA, dB, dC, N);
hipMemcpy(dA, A, sizeof(int) * N * N, hipMemcpyDeviceToHost);
hipMemcpy(dB, B, sizeof(int) * N * N, hipMemcpyDeviceToHost);
hipMemcpy(dC, C, sizeof(int) * N * N, hipMemcpyDeviceToHost);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <iostream>
#include <stdlib.h>
#include <string>
#include <vector>
#include <sstream>
#include <hip/hip_runtime.h>
#include <iterator>
using namespace std;
__global__ void multiply(int *A, int *B, int *C, int N) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int i = idx / N, j = idx % N;
int sum = 0;
for (int k = 0; k < N; k++) {
sum += A[i * N + k] * B[k * N + j];
}
C[idx] = sum;
}
int main() {
string line;
int N = -1;
int *A, *B, *C, *cur;
int count = 0;
while(getline(cin, line)) {
if (N == -1) {
N = atoi(line.c_str());
A = new int[N * N];
B = new int[N * N];
C = new int[N * N];
cur = A;
} else {
vector<string> nums;
istringstream iss(line);
copy(istream_iterator<string>(iss),
istream_iterator<string>(),
back_inserter(nums));
for (int i = 0; i < nums.size(); i++) {
cur[count * N + i] = atoi(nums[i].c_str());
}
count++;
if (count == N) {
count = 0;
cur = B;
}
}
}
int *dA, *dB, *dC;
hipMalloc(&dA, sizeof(int) * N * N);
hipMalloc(&dB, sizeof(int) * N * N);
hipMalloc(&dC, sizeof(int) * N * N);
hipMemcpy(dA, A, sizeof(int) * N * N, hipMemcpyHostToDevice);
hipMemcpy(dB, B, sizeof(int) * N * N, hipMemcpyHostToDevice);
hipMemcpy(dC, C, sizeof(int) * N * N, hipMemcpyHostToDevice);
dim3 blockDim(64, 1, 1);
dim3 gridDim(N * N / blockDim.x, 1, 1);
multiply<<<gridDim, blockDim>>>(dA, dB, dC, N);
hipMemcpy(dA, A, sizeof(int) * N * N, hipMemcpyDeviceToHost);
hipMemcpy(dB, B, sizeof(int) * N * N, hipMemcpyDeviceToHost);
hipMemcpy(dC, C, sizeof(int) * N * N, hipMemcpyDeviceToHost);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z8multiplyPiS_S_i
.globl _Z8multiplyPiS_S_i
.p2align 8
.type _Z8multiplyPiS_S_i,@function
_Z8multiplyPiS_S_i:
s_clause 0x1
s_load_b32 s3, s[0:1], 0x2c
s_load_b32 s2, s[0:1], 0x18
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s3, 0xffff
s_cmp_lt_i32 s2, 1
v_mad_u64_u32 v[1:2], null, s15, s3, v[0:1]
s_mov_b32 s3, 0
s_cbranch_scc1 .LBB0_3
s_ashr_i32 s4, s2, 31
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_ashrrev_i32_e32 v4, 31, v1
s_add_i32 s5, s2, s4
s_xor_b32 s4, s5, s4
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v3, v1, v4
v_cvt_f32_u32_e32 v0, s4
s_sub_i32 s5, 0, s4
v_xor_b32_e32 v5, v3, v4
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_rcp_iflag_f32_e32 v0, v0
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v0, 0x4f7ffffe, v0
v_cvt_u32_f32_e32 v0, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v2, s5, v0
v_mul_hi_u32 v2, v0, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v0, v0, v2
v_mad_u64_u32 v[2:3], null, v5, v0, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v0, v3, s4
v_sub_nc_u32_e32 v0, v5, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_subrev_nc_u32_e32 v2, s4, v0
v_cmp_le_u32_e32 vcc_lo, s4, v0
v_cndmask_b32_e32 v0, v0, v2, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
v_subrev_nc_u32_e32 v2, s4, v0
v_cmp_le_u32_e32 vcc_lo, s4, v0
s_load_b128 s[4:7], s[0:1], 0x0
v_cndmask_b32_e32 v0, v0, v2, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_xor_b32_e32 v0, v0, v4
v_sub_nc_u32_e32 v2, v0, v4
v_mov_b32_e32 v0, 0
s_delay_alu instid0(VALU_DEP_2)
v_sub_nc_u32_e32 v4, v1, v2
.p2align 6
.LBB0_2:
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
v_add_nc_u32_e32 v5, s3, v4
v_ashrrev_i32_e32 v3, 31, v2
s_add_i32 s3, s3, 1
s_cmp_eq_u32 s2, s3
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_ashrrev_i32_e32 v6, 31, v5
v_lshlrev_b64 v[7:8], 2, v[2:3]
v_add_nc_u32_e32 v2, s2, v2
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_lshlrev_b64 v[5:6], 2, v[5:6]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v7, vcc_lo, s6, v7
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_ci_u32_e32 v8, vcc_lo, s7, v8, vcc_lo
v_add_co_u32 v5, vcc_lo, s4, v5
s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e32 v6, vcc_lo, s5, v6, vcc_lo
global_load_b32 v3, v[7:8], off
global_load_b32 v7, v[5:6], off
s_waitcnt vmcnt(0)
v_mad_u64_u32 v[5:6], null, v3, v7, v[0:1]
v_mov_b32_e32 v0, v5
s_cbranch_scc0 .LBB0_2
s_branch .LBB0_4
.LBB0_3:
v_mov_b32_e32 v0, 0
.LBB0_4:
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v2, 31, v1
v_lshlrev_b64 v[1:2], 2, v[1:2]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v1, vcc_lo, s0, v1
v_add_co_ci_u32_e32 v2, vcc_lo, s1, v2, vcc_lo
global_store_b32 v[1:2], v0, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z8multiplyPiS_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 9
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z8multiplyPiS_S_i, .Lfunc_end0-_Z8multiplyPiS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z8multiplyPiS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z8multiplyPiS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 9
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z8multiplyPiS_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ IABS R8, c[0x0][0x178] ; /* 0x00005e0000087a13 */
/* 0x000fe20000000000 */
/*0020*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0040*/ HFMA2.MMA R18, -RZ, RZ, 0, 0 ; /* 0x00000000ff127435 */
/* 0x000fe200000001ff */
/*0050*/ I2F.RP R6, R8 ; /* 0x0000000800067306 */
/* 0x000e620000209400 */
/*0060*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e2e0000002100 */
/*0070*/ MUFU.RCP R6, R6 ; /* 0x0000000600067308 */
/* 0x002e620000001000 */
/*0080*/ IMAD R2, R0, c[0x0][0x0], R3 ; /* 0x0000000000027a24 */
/* 0x001fca00078e0203 */
/*0090*/ ISETP.GE.AND P1, PT, R2, RZ, PT ; /* 0x000000ff0200720c */
/* 0x000fe40003f26270 */
/*00a0*/ IADD3 R4, R6, 0xffffffe, RZ ; /* 0x0ffffffe06047810 */
/* 0x002fe40007ffe0ff */
/*00b0*/ IABS R6, R2 ; /* 0x0000000200067213 */
/* 0x000fe40000000000 */
/*00c0*/ F2I.FTZ.U32.TRUNC.NTZ R5, R4 ; /* 0x0000000400057305 */
/* 0x000064000021f000 */
/*00d0*/ HFMA2.MMA R4, -RZ, RZ, 0, 0 ; /* 0x00000000ff047435 */
/* 0x001fe200000001ff */
/*00e0*/ IADD3 R7, RZ, -R5, RZ ; /* 0x80000005ff077210 */
/* 0x002fca0007ffe0ff */
/*00f0*/ IMAD R7, R7, R8, RZ ; /* 0x0000000807077224 */
/* 0x000fc800078e02ff */
/*0100*/ IMAD.HI.U32 R5, R5, R7, R4 ; /* 0x0000000705057227 */
/* 0x000fe200078e0004 */
/*0110*/ MOV R4, c[0x0][0x178] ; /* 0x00005e0000047a02 */
/* 0x000fca0000000f00 */
/*0120*/ IMAD.HI.U32 R5, R5, R6, RZ ; /* 0x0000000605057227 */
/* 0x000fca00078e00ff */
/*0130*/ IADD3 R5, -R5, RZ, RZ ; /* 0x000000ff05057210 */
/* 0x000fca0007ffe1ff */
/*0140*/ IMAD R5, R8, R5, R6 ; /* 0x0000000508057224 */
/* 0x000fca00078e0206 */
/*0150*/ ISETP.GT.U32.AND P0, PT, R8, R5, PT ; /* 0x000000050800720c */
/* 0x000fda0003f04070 */
/*0160*/ @!P0 IADD3 R5, R5, -R8, RZ ; /* 0x8000000805058210 */
/* 0x000fe40007ffe0ff */
/*0170*/ ISETP.NE.AND P0, PT, RZ, c[0x0][0x178], PT ; /* 0x00005e00ff007a0c */
/* 0x000fe40003f05270 */
/*0180*/ ISETP.GT.U32.AND P2, PT, R8, R5, PT ; /* 0x000000050800720c */
/* 0x000fda0003f44070 */
/*0190*/ @!P2 IADD3 R5, R5, -R8, RZ ; /* 0x800000080505a210 */
/* 0x000fe40007ffe0ff */
/*01a0*/ ISETP.GE.AND P2, PT, R4, 0x1, PT ; /* 0x000000010400780c */
/* 0x000fe40003f46270 */
/*01b0*/ @!P1 IADD3 R5, -R5, RZ, RZ ; /* 0x000000ff05059210 */
/* 0x000fe40007ffe1ff */
/*01c0*/ @!P0 LOP3.LUT R5, RZ, c[0x0][0x178], RZ, 0x33, !PT ; /* 0x00005e00ff058a12 */
/* 0x000fd200078e33ff */
/*01d0*/ @!P2 BRA 0xd00 ; /* 0x00000b200000a947 */
/* 0x000fea0003800000 */
/*01e0*/ IADD3 R6, R4.reuse, -0x1, RZ ; /* 0xffffffff04067810 */
/* 0x040fe40007ffe0ff */
/*01f0*/ LOP3.LUT R7, R4, 0x3, RZ, 0xc0, !PT ; /* 0x0000000304077812 */
/* 0x000fe400078ec0ff */
/*0200*/ ISETP.GE.U32.AND P0, PT, R6, 0x3, PT ; /* 0x000000030600780c */
/* 0x000fe40003f06070 */
/*0210*/ MOV R6, RZ ; /* 0x000000ff00067202 */
/* 0x000fe40000000f00 */
/*0220*/ MOV R18, RZ ; /* 0x000000ff00127202 */
/* 0x000fd20000000f00 */
/*0230*/ @!P0 BRA 0xbe0 ; /* 0x000009a000008947 */
/* 0x000fea0003800000 */
/*0240*/ IADD3 R8, -R7, c[0x0][0x178], RZ ; /* 0x00005e0007087a10 */
/* 0x000fe20007ffe1ff */
/*0250*/ HFMA2.MMA R26, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff1a7435 */
/* 0x000fe200000001ff */
/*0260*/ ULDC.64 UR6, c[0x0][0x160] ; /* 0x0000580000067ab9 */
/* 0x000fe20000000a00 */
/*0270*/ IADD3 R9, R2, -R5, RZ ; /* 0x8000000502097210 */
/* 0x000fe40007ffe0ff */
/*0280*/ ISETP.GT.AND P0, PT, R8, RZ, PT ; /* 0x000000ff0800720c */
/* 0x000fe40003f04270 */
/*0290*/ MOV R6, RZ ; /* 0x000000ff00067202 */
/* 0x000fc80000000f00 */
/*02a0*/ IMAD.WIDE R26, R5, R26, c[0x0][0x168] ; /* 0x00005a00051a7625 */
/* 0x000fce00078e021a */
/*02b0*/ @!P0 BRA 0xa50 ; /* 0x0000079000008947 */
/* 0x000fea0003800000 */
/*02c0*/ ISETP.GT.AND P1, PT, R8, 0xc, PT ; /* 0x0000000c0800780c */
/* 0x000fe40003f24270 */
/*02d0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */
/* 0x000fd60003f0f070 */
/*02e0*/ @!P1 BRA 0x790 ; /* 0x000004a000009947 */
/* 0x000fea0003800000 */
/*02f0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40003f0e170 */
/*0300*/ MOV R12, UR6 ; /* 0x00000006000c7c02 */
/* 0x000fe20008000f00 */
/*0310*/ LDG.E R19, [R26.64] ; /* 0x000000041a137981 */
/* 0x0000a2000c1e1900 */
/*0320*/ MOV R13, UR7 ; /* 0x00000007000d7c02 */
/* 0x000fca0008000f00 */
/*0330*/ IMAD.WIDE R12, R9, 0x4, R12 ; /* 0x00000004090c7825 */
/* 0x000fca00078e020c */
/*0340*/ LDG.E R28, [R12.64] ; /* 0x000000040c1c7981 */
/* 0x000ea2000c1e1900 */
/*0350*/ IMAD.WIDE R16, R4, 0x4, R26 ; /* 0x0000000404107825 */
/* 0x000fc600078e021a */
/*0360*/ LDG.E R23, [R12.64+0x4] ; /* 0x000004040c177981 */
/* 0x000ee6000c1e1900 */
/*0370*/ IMAD.WIDE R10, R4.reuse, 0x4, R16 ; /* 0x00000004040a7825 */
/* 0x040fe200078e0210 */
/*0380*/ LDG.E R22, [R16.64] ; /* 0x0000000410167981 */
/* 0x0002e8000c1e1900 */
/*0390*/ LDG.E R25, [R10.64] ; /* 0x000000040a197981 */
/* 0x000968000c1e1900 */
/*03a0*/ LDG.E R24, [R12.64+0x8] ; /* 0x000008040c187981 */
/* 0x000f68000c1e1900 */
/*03b0*/ LDG.E R14, [R12.64+0xc] ; /* 0x00000c040c0e7981 */
/* 0x000f62000c1e1900 */
/*03c0*/ IMAD.WIDE R10, R4, 0x4, R10 ; /* 0x00000004040a7825 */
/* 0x010fc600078e020a */
/*03d0*/ LDG.E R26, [R12.64+0x10] ; /* 0x000010040c1a7981 */
/* 0x001f28000c1e1900 */
/*03e0*/ LDG.E R15, [R10.64] ; /* 0x000000040a0f7981 */
/* 0x000122000c1e1900 */
/*03f0*/ IMAD.WIDE R20, R4, 0x4, R10 ; /* 0x0000000404147825 */
/* 0x000fca00078e020a */
/*0400*/ LDG.E R27, [R20.64] ; /* 0x00000004141b7981 */
/* 0x000122000c1e1900 */
/*0410*/ IMAD.WIDE R16, R4, 0x4, R20 ; /* 0x0000000404107825 */
/* 0x002fc600078e0214 */
/*0420*/ LDG.E R10, [R12.64+0x18] ; /* 0x000018040c0a7981 */
/* 0x001f28000c1e1900 */
/*0430*/ LDG.E R21, [R12.64+0x1c] ; /* 0x00001c040c157981 */
/* 0x000f22000c1e1900 */
/*0440*/ IMAD R28, R19, R28, R18 ; /* 0x0000001c131c7224 */
/* 0x004fc600078e0212 */
/*0450*/ LDG.E R18, [R12.64+0x14] ; /* 0x000014040c127981 */
/* 0x000ea8000c1e1900 */
/*0460*/ LDG.E R19, [R16.64] ; /* 0x0000000410137981 */
/* 0x0000a2000c1e1900 */
/*0470*/ IMAD R28, R22, R23, R28 ; /* 0x00000017161c7224 */
/* 0x008fe400078e021c */
/*0480*/ IMAD.WIDE R16, R4, 0x4, R16 ; /* 0x0000000404107825 */
/* 0x001fca00078e0210 */
/*0490*/ LDG.E R11, [R16.64] ; /* 0x00000004100b7981 */
/* 0x000ae2000c1e1900 */
/*04a0*/ IMAD.WIDE R22, R4, 0x4, R16 ; /* 0x0000000404167825 */
/* 0x000fca00078e0210 */
/*04b0*/ LDG.E R20, [R22.64] ; /* 0x0000000416147981 */
/* 0x0000e2000c1e1900 */
/*04c0*/ IMAD R16, R25, R24, R28 ; /* 0x0000001819107224 */
/* 0x020fe400078e021c */
/*04d0*/ IMAD.WIDE R24, R4.reuse, 0x4, R22 ; /* 0x0000000404187825 */
/* 0x040fe200078e0216 */
/*04e0*/ LDG.E R28, [R12.64+0x20] ; /* 0x000020040c1c7981 */
/* 0x000f68000c1e1900 */
/*04f0*/ LDG.E R29, [R24.64] ; /* 0x00000004181d7981 */
/* 0x000362000c1e1900 */
/*0500*/ IMAD R16, R15, R14, R16 ; /* 0x0000000e0f107224 */
/* 0x010fe400078e0210 */
/*0510*/ IMAD.WIDE R14, R4, 0x4, R24 ; /* 0x00000004040e7825 */
/* 0x000fe200078e0218 */
/*0520*/ LDG.E R23, [R12.64+0x24] ; /* 0x000024040c177981 */
/* 0x001f26000c1e1900 */
/*0530*/ IMAD R26, R27, R26, R16 ; /* 0x0000001a1b1a7224 */
/* 0x000fc400078e0210 */
/*0540*/ IMAD.WIDE R16, R4, 0x4, R14 ; /* 0x0000000404107825 */
/* 0x000fe200078e020e */
/*0550*/ LDG.E R27, [R12.64+0x28] ; /* 0x000028040c1b7981 */
/* 0x000f28000c1e1900 */
/*0560*/ LDG.E R14, [R14.64] ; /* 0x000000040e0e7981 */
/* 0x000128000c1e1900 */
/*0570*/ LDG.E R22, [R16.64] ; /* 0x0000000410167981 */
/* 0x000328000c1e1900 */
/*0580*/ LDG.E R15, [R12.64+0x30] ; /* 0x000030040c0f7981 */
/* 0x001f22000c1e1900 */
/*0590*/ IMAD R26, R19, R18, R26 ; /* 0x00000012131a7224 */
/* 0x004fc400078e021a */
/*05a0*/ IMAD.WIDE R18, R4, 0x4, R16 ; /* 0x0000000404127825 */
/* 0x000fc800078e0210 */
/*05b0*/ IMAD R26, R11, R10, R26 ; /* 0x0000000a0b1a7224 */
/* 0x008fe400078e021a */
/*05c0*/ IMAD.WIDE R10, R4, 0x4, R18 ; /* 0x00000004040a7825 */
/* 0x000fe400078e0212 */
/*05d0*/ LDG.E R18, [R18.64] ; /* 0x0000000412127981 */
/* 0x0000a4000c1e1900 */
/*05e0*/ IMAD R24, R20, R21, R26 ; /* 0x0000001514187224 */
/* 0x002fe400078e021a */
/*05f0*/ IMAD.WIDE R20, R4, 0x4, R10 ; /* 0x0000000404147825 */
/* 0x000fe200078e020a */
/*0600*/ LDG.E R26, [R12.64+0x2c] ; /* 0x00002c040c1a7981 */
/* 0x000ea8000c1e1900 */
/*0610*/ LDG.E R10, [R10.64] ; /* 0x000000040a0a7981 */
/* 0x0002e2000c1e1900 */
/*0620*/ IMAD R28, R29, R28, R24 ; /* 0x0000001c1d1c7224 */
/* 0x020fc600078e0218 */
/*0630*/ LDG.E R19, [R12.64+0x38] ; /* 0x000038040c137981 */
/* 0x001f62000c1e1900 */
/*0640*/ IMAD.WIDE R24, R4, 0x4, R20 ; /* 0x0000000404187825 */
/* 0x000fc600078e0214 */
/*0650*/ LDG.E R20, [R20.64] ; /* 0x0000000414147981 */
/* 0x000168000c1e1900 */
/*0660*/ LDG.E R11, [R12.64+0x34] ; /* 0x000034040c0b7981 */
/* 0x002f62000c1e1900 */
/*0670*/ IMAD.WIDE R16, R4, 0x4, R24 ; /* 0x0000000404107825 */
/* 0x000fc600078e0218 */
/*0680*/ LDG.E R29, [R24.64] ; /* 0x00000004181d7981 */
/* 0x000368000c1e1900 */
/*0690*/ LDG.E R21, [R16.64] ; /* 0x0000000410157981 */
/* 0x001f68000c1e1900 */
/*06a0*/ LDG.E R24, [R12.64+0x3c] ; /* 0x00003c040c187981 */
/* 0x002f62000c1e1900 */
/*06b0*/ IMAD R14, R14, R23, R28 ; /* 0x000000170e0e7224 */
/* 0x010fe200078e021c */
/*06c0*/ IADD3 R8, R8, -0x10, RZ ; /* 0xfffffff008087810 */
/* 0x000fc60007ffe0ff */
/*06d0*/ IMAD R27, R22, R27, R14 ; /* 0x0000001b161b7224 */
/* 0x000fe200078e020e */
/*06e0*/ ISETP.GT.AND P1, PT, R8, 0xc, PT ; /* 0x0000000c0800780c */
/* 0x000fe20003f24270 */
/*06f0*/ UIADD3 UR6, UP0, UR6, 0x40, URZ ; /* 0x0000004006067890 */
/* 0x000fe2000ff1e03f */
/*0700*/ IADD3 R6, R6, 0x10, RZ ; /* 0x0000001006067810 */
/* 0x000fc60007ffe0ff */
/*0710*/ UIADD3.X UR7, URZ, UR7, URZ, UP0, !UPT ; /* 0x000000073f077290 */
/* 0x000fe200087fe43f */
/*0720*/ IMAD R18, R18, R26, R27 ; /* 0x0000001a12127224 */
/* 0x004fc800078e021b */
/*0730*/ IMAD R10, R10, R15, R18 ; /* 0x0000000f0a0a7224 */
/* 0x008fe400078e0212 */
/*0740*/ IMAD.WIDE R26, R4, 0x4, R16 ; /* 0x00000004041a7825 */
/* 0x000fc800078e0210 */
/*0750*/ IMAD R10, R20, R11, R10 ; /* 0x0000000b140a7224 */
/* 0x020fc800078e020a */
/*0760*/ IMAD R10, R29, R19, R10 ; /* 0x000000131d0a7224 */
/* 0x000fc800078e020a */
/*0770*/ IMAD R18, R21, R24, R10 ; /* 0x0000001815127224 */
/* 0x000fe200078e020a */
/*0780*/ @P1 BRA 0x300 ; /* 0xfffffb7000001947 */
/* 0x000fea000383ffff */
/*0790*/ ISETP.GT.AND P1, PT, R8, 0x4, PT ; /* 0x000000040800780c */
/* 0x000fda0003f24270 */
/*07a0*/ @!P1 BRA 0xa30 ; /* 0x0000028000009947 */
/* 0x000fea0003800000 */
/*07b0*/ MOV R10, UR6 ; /* 0x00000006000a7c02 */
/* 0x000fe20008000f00 */
/*07c0*/ LDG.E R19, [R26.64] ; /* 0x000000041a137981 */
/* 0x000ea2000c1e1900 */
/*07d0*/ MOV R11, UR7 ; /* 0x00000007000b7c02 */
/* 0x000fca0008000f00 */
/*07e0*/ IMAD.WIDE R10, R9, 0x4, R10 ; /* 0x00000004090a7825 */
/* 0x000fca00078e020a */
/*07f0*/ LDG.E R24, [R10.64] ; /* 0x000000040a187981 */
/* 0x000ea2000c1e1900 */
/*0800*/ IMAD.WIDE R22, R4, 0x4, R26 ; /* 0x0000000404167825 */
/* 0x000fc600078e021a */
/*0810*/ LDG.E R25, [R10.64+0x4] ; /* 0x000004040a197981 */
/* 0x000ee6000c1e1900 */
/*0820*/ IMAD.WIDE R14, R4.reuse, 0x4, R22 ; /* 0x00000004040e7825 */
/* 0x040fe200078e0216 */
/*0830*/ LDG.E R29, [R10.64+0x8] ; /* 0x000008040a1d7981 */
/* 0x000f28000c1e1900 */
/*0840*/ LDG.E R22, [R22.64] ; /* 0x0000000416167981 */
/* 0x0000e2000c1e1900 */
/*0850*/ IMAD.WIDE R16, R4, 0x4, R14 ; /* 0x0000000404107825 */
/* 0x000fc600078e020e */
/*0860*/ LDG.E R14, [R14.64] ; /* 0x000000040e0e7981 */
/* 0x000326000c1e1900 */
/*0870*/ IMAD.WIDE R12, R4.reuse, 0x4, R16 ; /* 0x00000004040c7825 */
/* 0x040fe200078e0210 */
/*0880*/ LDG.E R28, [R10.64+0xc] ; /* 0x00000c040a1c7981 */
/* 0x000f68000c1e1900 */
/*0890*/ LDG.E R17, [R16.64] ; /* 0x0000000410117981 */
/* 0x000362000c1e1900 */
/*08a0*/ IMAD.WIDE R20, R4, 0x4, R12 ; /* 0x0000000404147825 */
/* 0x000fc600078e020c */
/*08b0*/ LDG.E R12, [R12.64] ; /* 0x000000040c0c7981 */
/* 0x000328000c1e1900 */
/*08c0*/ LDG.E R23, [R10.64+0x14] ; /* 0x000014040a177981 */
/* 0x001f68000c1e1900 */
/*08d0*/ LDG.E R16, [R10.64+0x18] ; /* 0x000018040a107981 */
/* 0x002f68000c1e1900 */
/*08e0*/ LDG.E R13, [R10.64+0x10] ; /* 0x000010040a0d7981 */
/* 0x000f62000c1e1900 */
/*08f0*/ IMAD R24, R19, R24, R18 ; /* 0x0000001813187224 */
/* 0x004fc400078e0212 */
/*0900*/ IMAD.WIDE R18, R4.reuse, 0x4, R20 ; /* 0x0000000404127825 */
/* 0x040fe400078e0214 */
/*0910*/ LDG.E R20, [R20.64] ; /* 0x0000000414147981 */
/* 0x000ea8000c1e1900 */
/*0920*/ IMAD.WIDE R26, R4, 0x4, R18 ; /* 0x00000004041a7825 */
/* 0x000fe200078e0212 */
/*0930*/ LDG.E R15, [R18.64] ; /* 0x00000004120f7981 */
/* 0x0000a8000c1e1900 */
/*0940*/ LDG.E R19, [R10.64+0x1c] ; /* 0x00001c040a137981 */
/* 0x001ea8000c1e1900 */
/*0950*/ LDG.E R18, [R26.64] ; /* 0x000000041a127981 */
/* 0x0000a2000c1e1900 */
/*0960*/ IMAD R22, R22, R25, R24 ; /* 0x0000001916167224 */
/* 0x008fc800078e0218 */
/*0970*/ IMAD R14, R14, R29, R22 ; /* 0x0000001d0e0e7224 */
/* 0x010fc800078e0216 */
/*0980*/ IMAD R14, R17, R28, R14 ; /* 0x0000001c110e7224 */
/* 0x020fc800078e020e */
/*0990*/ IMAD R12, R12, R13, R14 ; /* 0x0000000d0c0c7224 */
/* 0x000fe200078e020e */
/*09a0*/ UIADD3 UR6, UP0, UR6, 0x20, URZ ; /* 0x0000002006067890 */
/* 0x000fe2000ff1e03f */
/*09b0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe20003f0e170 */
/*09c0*/ IMAD.WIDE R26, R4, 0x4, R26 ; /* 0x00000004041a7825 */
/* 0x001fe200078e021a */
/*09d0*/ IADD3 R6, R6, 0x8, RZ ; /* 0x0000000806067810 */
/* 0x000fe40007ffe0ff */
/*09e0*/ IADD3 R8, R8, -0x8, RZ ; /* 0xfffffff808087810 */
/* 0x000fe20007ffe0ff */
/*09f0*/ UIADD3.X UR7, URZ, UR7, URZ, UP0, !UPT ; /* 0x000000073f077290 */
/* 0x000fe200087fe43f */
/*0a00*/ IMAD R12, R20, R23, R12 ; /* 0x00000017140c7224 */
/* 0x004fc800078e020c */
/*0a10*/ IMAD R12, R15, R16, R12 ; /* 0x000000100f0c7224 */
/* 0x000fc800078e020c */
/*0a20*/ IMAD R18, R18, R19, R12 ; /* 0x0000001312127224 */
/* 0x000fe400078e020c */
/*0a30*/ ISETP.NE.OR P0, PT, R8, RZ, P0 ; /* 0x000000ff0800720c */
/* 0x000fda0000705670 */
/*0a40*/ @!P0 BRA 0xbe0 ; /* 0x0000019000008947 */
/* 0x000fea0003800000 */
/*0a50*/ MOV R10, UR6 ; /* 0x00000006000a7c02 */
/* 0x000fe20008000f00 */
/*0a60*/ IMAD.WIDE R12, R4, 0x4, R26 ; /* 0x00000004040c7825 */
/* 0x000fe200078e021a */
/*0a70*/ MOV R11, UR7 ; /* 0x00000007000b7c02 */
/* 0x000fe20008000f00 */
/*0a80*/ LDG.E R27, [R26.64] ; /* 0x000000041a1b7981 */
/* 0x000ea8000c1e1900 */
/*0a90*/ IMAD.WIDE R10, R9, 0x4, R10 ; /* 0x00000004090a7825 */
/* 0x000fc800078e020a */
/*0aa0*/ IMAD.WIDE R14, R4.reuse, 0x4, R12 ; /* 0x00000004040e7825 */
/* 0x040fe200078e020c */
/*0ab0*/ LDG.E R19, [R10.64] ; /* 0x000000040a137981 */
/* 0x000ea8000c1e1900 */
/*0ac0*/ LDG.E R12, [R12.64] ; /* 0x000000040c0c7981 */
/* 0x000ee2000c1e1900 */
/*0ad0*/ IMAD.WIDE R16, R4, 0x4, R14 ; /* 0x0000000404107825 */
/* 0x000fc600078e020e */
/*0ae0*/ LDG.E R20, [R10.64+0x4] ; /* 0x000004040a147981 */
/* 0x000ee8000c1e1900 */
/*0af0*/ LDG.E R22, [R14.64] ; /* 0x000000040e167981 */
/* 0x000f28000c1e1900 */
/*0b00*/ LDG.E R21, [R10.64+0x8] ; /* 0x000008040a157981 */
/* 0x000f28000c1e1900 */
/*0b10*/ LDG.E R23, [R10.64+0xc] ; /* 0x00000c040a177981 */
/* 0x000f68000c1e1900 */
/*0b20*/ LDG.E R24, [R16.64] ; /* 0x0000000410187981 */
/* 0x000f62000c1e1900 */
/*0b30*/ IADD3 R8, R8, -0x4, RZ ; /* 0xfffffffc08087810 */
/* 0x000fc80007ffe0ff */
/*0b40*/ ISETP.NE.AND P0, PT, R8, RZ, PT ; /* 0x000000ff0800720c */
/* 0x000fe20003f05270 */
/*0b50*/ UIADD3 UR6, UP0, UR6, 0x10, URZ ; /* 0x0000001006067890 */
/* 0x000fe2000ff1e03f */
/*0b60*/ IADD3 R6, R6, 0x4, RZ ; /* 0x0000000406067810 */
/* 0x000fc60007ffe0ff */
/*0b70*/ UIADD3.X UR7, URZ, UR7, URZ, UP0, !UPT ; /* 0x000000073f077290 */
/* 0x000fe200087fe43f */
/*0b80*/ IMAD R19, R27, R19, R18 ; /* 0x000000131b137224 */
/* 0x004fc800078e0212 */
/*0b90*/ IMAD R19, R12, R20, R19 ; /* 0x000000140c137224 */
/* 0x008fe400078e0213 */
/*0ba0*/ IMAD.WIDE R26, R4, 0x4, R16 ; /* 0x00000004041a7825 */
/* 0x000fc800078e0210 */
/*0bb0*/ IMAD R19, R22, R21, R19 ; /* 0x0000001516137224 */
/* 0x010fc800078e0213 */
/*0bc0*/ IMAD R18, R24, R23, R19 ; /* 0x0000001718127224 */
/* 0x020fe200078e0213 */
/*0bd0*/ @P0 BRA 0xa50 ; /* 0xfffffe7000000947 */
/* 0x000fea000383ffff */
/*0be0*/ ISETP.NE.AND P0, PT, R7, RZ, PT ; /* 0x000000ff0700720c */
/* 0x000fda0003f05270 */
/*0bf0*/ @!P0 BRA 0xd00 ; /* 0x0000010000008947 */
/* 0x000fea0003800000 */
/*0c00*/ IADD3 R3, R3, R6, RZ ; /* 0x0000000603037210 */
/* 0x000fe20007ffe0ff */
/*0c10*/ HFMA2.MMA R10, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff0a7435 */
/* 0x000fc800000001ff */
/*0c20*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x000fca00078e0203 */
/*0c30*/ IADD3 R0, -R5, R0, RZ ; /* 0x0000000005007210 */
/* 0x000fe20007ffe1ff */
/*0c40*/ IMAD R5, R6, c[0x0][0x178], R5 ; /* 0x00005e0006057a24 */
/* 0x000fc800078e0205 */
/*0c50*/ IMAD.WIDE R8, R0, R10, c[0x0][0x160] ; /* 0x0000580000087625 */
/* 0x000fc800078e020a */
/*0c60*/ IMAD.WIDE R10, R5, R10, c[0x0][0x168] ; /* 0x00005a00050a7625 */
/* 0x000fca00078e020a */
/*0c70*/ LDG.E R3, [R10.64] ; /* 0x000000040a037981 */
/* 0x0000a8000c1e1900 */
/*0c80*/ LDG.E R0, [R8.64] ; /* 0x0000000408007981 */
/* 0x0002a2000c1e1900 */
/*0c90*/ IADD3 R7, R7, -0x1, RZ ; /* 0xffffffff07077810 */
/* 0x000fc80007ffe0ff */
/*0ca0*/ ISETP.NE.AND P0, PT, R7, RZ, PT ; /* 0x000000ff0700720c */
/* 0x000fe20003f05270 */
/*0cb0*/ IMAD.WIDE R10, R4, 0x4, R10 ; /* 0x00000004040a7825 */
/* 0x001fe200078e020a */
/*0cc0*/ IADD3 R8, P1, R8, 0x4, RZ ; /* 0x0000000408087810 */
/* 0x002fc80007f3e0ff */
/*0cd0*/ IADD3.X R9, RZ, R9, RZ, P1, !PT ; /* 0x00000009ff097210 */
/* 0x000fe20000ffe4ff */
/*0ce0*/ IMAD R18, R3, R0, R18 ; /* 0x0000000003127224 */
/* 0x004fcc00078e0212 */
/*0cf0*/ @P0 BRA 0xc70 ; /* 0xffffff7000000947 */
/* 0x000fea000383ffff */
/*0d00*/ MOV R3, 0x4 ; /* 0x0000000400037802 */
/* 0x000fca0000000f00 */
/*0d10*/ IMAD.WIDE R2, R2, R3, c[0x0][0x170] ; /* 0x00005c0002027625 */
/* 0x000fca00078e0203 */
/*0d20*/ STG.E [R2.64], R18 ; /* 0x0000001202007986 */
/* 0x000fe2000c101904 */
/*0d30*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0d40*/ BRA 0xd40; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0d50*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0d60*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0d70*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0d80*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0d90*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0da0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0db0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0dc0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0dd0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0de0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0df0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z8multiplyPiS_S_i
.globl _Z8multiplyPiS_S_i
.p2align 8
.type _Z8multiplyPiS_S_i,@function
_Z8multiplyPiS_S_i:
s_clause 0x1
s_load_b32 s3, s[0:1], 0x2c
s_load_b32 s2, s[0:1], 0x18
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s3, 0xffff
s_cmp_lt_i32 s2, 1
v_mad_u64_u32 v[1:2], null, s15, s3, v[0:1]
s_mov_b32 s3, 0
s_cbranch_scc1 .LBB0_3
s_ashr_i32 s4, s2, 31
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_ashrrev_i32_e32 v4, 31, v1
s_add_i32 s5, s2, s4
s_xor_b32 s4, s5, s4
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v3, v1, v4
v_cvt_f32_u32_e32 v0, s4
s_sub_i32 s5, 0, s4
v_xor_b32_e32 v5, v3, v4
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_rcp_iflag_f32_e32 v0, v0
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v0, 0x4f7ffffe, v0
v_cvt_u32_f32_e32 v0, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v2, s5, v0
v_mul_hi_u32 v2, v0, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v0, v0, v2
v_mad_u64_u32 v[2:3], null, v5, v0, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v0, v3, s4
v_sub_nc_u32_e32 v0, v5, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_subrev_nc_u32_e32 v2, s4, v0
v_cmp_le_u32_e32 vcc_lo, s4, v0
v_cndmask_b32_e32 v0, v0, v2, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
v_subrev_nc_u32_e32 v2, s4, v0
v_cmp_le_u32_e32 vcc_lo, s4, v0
s_load_b128 s[4:7], s[0:1], 0x0
v_cndmask_b32_e32 v0, v0, v2, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_xor_b32_e32 v0, v0, v4
v_sub_nc_u32_e32 v2, v0, v4
v_mov_b32_e32 v0, 0
s_delay_alu instid0(VALU_DEP_2)
v_sub_nc_u32_e32 v4, v1, v2
.p2align 6
.LBB0_2:
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
v_add_nc_u32_e32 v5, s3, v4
v_ashrrev_i32_e32 v3, 31, v2
s_add_i32 s3, s3, 1
s_cmp_eq_u32 s2, s3
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_ashrrev_i32_e32 v6, 31, v5
v_lshlrev_b64 v[7:8], 2, v[2:3]
v_add_nc_u32_e32 v2, s2, v2
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_lshlrev_b64 v[5:6], 2, v[5:6]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v7, vcc_lo, s6, v7
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_ci_u32_e32 v8, vcc_lo, s7, v8, vcc_lo
v_add_co_u32 v5, vcc_lo, s4, v5
s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e32 v6, vcc_lo, s5, v6, vcc_lo
global_load_b32 v3, v[7:8], off
global_load_b32 v7, v[5:6], off
s_waitcnt vmcnt(0)
v_mad_u64_u32 v[5:6], null, v3, v7, v[0:1]
v_mov_b32_e32 v0, v5
s_cbranch_scc0 .LBB0_2
s_branch .LBB0_4
.LBB0_3:
v_mov_b32_e32 v0, 0
.LBB0_4:
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v2, 31, v1
v_lshlrev_b64 v[1:2], 2, v[1:2]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v1, vcc_lo, s0, v1
v_add_co_ci_u32_e32 v2, vcc_lo, s1, v2, vcc_lo
global_store_b32 v[1:2], v0, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z8multiplyPiS_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 9
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z8multiplyPiS_S_i, .Lfunc_end0-_Z8multiplyPiS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z8multiplyPiS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z8multiplyPiS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 9
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "includes.h"
__global__ void zupdate(float *z, float *z0, float tau, int nx, int ny)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int idx = x + y*nx;
if (x<nx && y<ny)
{
float a = z[2 * idx + 0];
float b = z[2 * idx + 1];
float t = 1 / (1 + tau*sqrtf(a*a + b*b));
z[2 * idx + 0] = (z0[2 * idx + 0] + tau*z[2 * idx + 0])*t;
z[2 * idx + 1] = (z0[2 * idx + 1] + tau*z[2 * idx + 1])*t;
}
} | code for sm_80
Function : _Z7zupdatePfS_fii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R3, SR_CTAID.Y ; /* 0x0000000000037919 */
/* 0x000e280000002600 */
/*0020*/ S2R R2, SR_TID.Y ; /* 0x0000000000027919 */
/* 0x000e280000002200 */
/*0030*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e680000002500 */
/*0040*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000e620000002100 */
/*0050*/ IMAD R3, R3, c[0x0][0x4], R2 ; /* 0x0000010003037a24 */
/* 0x001fca00078e0202 */
/*0060*/ ISETP.GE.AND P0, PT, R3, c[0x0][0x178], PT ; /* 0x00005e0003007a0c */
/* 0x000fe20003f06270 */
/*0070*/ IMAD R0, R0, c[0x0][0x0], R5 ; /* 0x0000000000007a24 */
/* 0x002fca00078e0205 */
/*0080*/ ISETP.GE.OR P0, PT, R0, c[0x0][0x174], P0 ; /* 0x00005d0000007a0c */
/* 0x000fda0000706670 */
/*0090*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00a0*/ IMAD R5, R3, c[0x0][0x174], R0 ; /* 0x00005d0003057a24 */
/* 0x000fe200078e0200 */
/*00b0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*00c0*/ IMAD.MOV.U32 R2, RZ, RZ, 0x4 ; /* 0x00000004ff027424 */
/* 0x000fc600078e00ff */
/*00d0*/ SHF.L.U32 R5, R5, 0x1, RZ ; /* 0x0000000105057819 */
/* 0x000fca00000006ff */
/*00e0*/ IMAD.WIDE R2, R5, R2, c[0x0][0x160] ; /* 0x0000580005027625 */
/* 0x000fca00078e0202 */
/*00f0*/ LDG.E R0, [R2.64+0x4] ; /* 0x0000040402007981 */
/* 0x000ea8000c1e1900 */
/*0100*/ LDG.E R4, [R2.64] ; /* 0x0000000402047981 */
/* 0x000ee2000c1e1900 */
/*0110*/ BSSY B0, 0x210 ; /* 0x000000f000007945 */
/* 0x000fe20003800000 */
/*0120*/ FMUL R7, R0, R0 ; /* 0x0000000000077220 */
/* 0x004fc80000400000 */
/*0130*/ FFMA R7, R4, R4, R7 ; /* 0x0000000404077223 */
/* 0x008fc80000000007 */
/*0140*/ MUFU.RSQ R6, R7 ; /* 0x0000000700067308 */
/* 0x0000620000001400 */
/*0150*/ IADD3 R8, R7, -0xd000000, RZ ; /* 0xf300000007087810 */
/* 0x000fc80007ffe0ff */
/*0160*/ ISETP.GT.U32.AND P0, PT, R8, 0x727fffff, PT ; /* 0x727fffff0800780c */
/* 0x000fda0003f04070 */
/*0170*/ @!P0 BRA 0x1c0 ; /* 0x0000004000008947 */
/* 0x000fea0003800000 */
/*0180*/ MOV R12, 0x1a0 ; /* 0x000001a0000c7802 */
/* 0x003fe40000000f00 */
/*0190*/ CALL.REL.NOINC 0x700 ; /* 0x0000056000007944 */
/* 0x000fea0003c00000 */
/*01a0*/ IMAD.MOV.U32 R6, RZ, RZ, R8 ; /* 0x000000ffff067224 */
/* 0x000fe200078e0008 */
/*01b0*/ BRA 0x200 ; /* 0x0000004000007947 */
/* 0x000fea0003800000 */
/*01c0*/ FMUL.FTZ R8, R7, R6 ; /* 0x0000000607087220 */
/* 0x003fe40000410000 */
/*01d0*/ FMUL.FTZ R6, R6, 0.5 ; /* 0x3f00000006067820 */
/* 0x000fe40000410000 */
/*01e0*/ FFMA R7, -R8, R8, R7 ; /* 0x0000000808077223 */
/* 0x000fc80000000107 */
/*01f0*/ FFMA R6, R7, R6, R8 ; /* 0x0000000607067223 */
/* 0x000fe40000000008 */
/*0200*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0210*/ MOV R7, c[0x0][0x170] ; /* 0x00005c0000077a02 */
/* 0x000fe20000000f00 */
/*0220*/ BSSY B0, 0x300 ; /* 0x000000d000007945 */
/* 0x000fe80003800000 */
/*0230*/ FFMA R9, R6, R7, 1 ; /* 0x3f80000006097423 */
/* 0x000fca0000000007 */
/*0240*/ IADD3 R6, R9, 0x1800000, RZ ; /* 0x0180000009067810 */
/* 0x000fc80007ffe0ff */
/*0250*/ LOP3.LUT R6, R6, 0x7f800000, RZ, 0xc0, !PT ; /* 0x7f80000006067812 */
/* 0x000fc800078ec0ff */
/*0260*/ ISETP.GT.U32.AND P0, PT, R6, 0x1ffffff, PT ; /* 0x01ffffff0600780c */
/* 0x000fda0003f04070 */
/*0270*/ @P0 BRA 0x2b0 ; /* 0x0000003000000947 */
/* 0x000fea0003800000 */
/*0280*/ MOV R6, 0x2a0 ; /* 0x000002a000067802 */
/* 0x000fe40000000f00 */
/*0290*/ CALL.REL.NOINC 0x3b0 ; /* 0x0000011000007944 */
/* 0x000fea0003c00000 */
/*02a0*/ BRA 0x2f0 ; /* 0x0000004000007947 */
/* 0x000fea0003800000 */
/*02b0*/ MUFU.RCP R8, R9 ; /* 0x0000000900087308 */
/* 0x000e240000001000 */
/*02c0*/ FFMA R6, R9, R8, -1 ; /* 0xbf80000009067423 */
/* 0x001fc80000000008 */
/*02d0*/ FADD.FTZ R7, -R6, -RZ ; /* 0x800000ff06077221 */
/* 0x000fc80000010100 */
/*02e0*/ FFMA R8, R8, R7, R8 ; /* 0x0000000708087223 */
/* 0x000fe40000000008 */
/*02f0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0300*/ IMAD.MOV.U32 R6, RZ, RZ, 0x4 ; /* 0x00000004ff067424 */
/* 0x000fc800078e00ff */
/*0310*/ IMAD.WIDE R6, R5, R6, c[0x0][0x168] ; /* 0x00005a0005067625 */
/* 0x000fca00078e0206 */
/*0320*/ LDG.E R5, [R6.64] ; /* 0x0000000406057981 */
/* 0x000ea4000c1e1900 */
/*0330*/ FFMA R5, R4, c[0x0][0x170], R5 ; /* 0x00005c0004057a23 */
/* 0x004fc80000000005 */
/*0340*/ FMUL R5, R5, R8 ; /* 0x0000000805057220 */
/* 0x002fca0000400000 */
/*0350*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe8000c101904 */
/*0360*/ LDG.E R9, [R6.64+0x4] ; /* 0x0000040406097981 */
/* 0x000ea4000c1e1900 */
/*0370*/ FFMA R9, R0, c[0x0][0x170], R9 ; /* 0x00005c0000097a23 */
/* 0x004fc80000000009 */
/*0380*/ FMUL R9, R9, R8 ; /* 0x0000000809097220 */
/* 0x000fca0000400000 */
/*0390*/ STG.E [R2.64+0x4], R9 ; /* 0x0000040902007986 */
/* 0x000fe2000c101904 */
/*03a0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*03b0*/ SHF.L.U32 R7, R9, 0x1, RZ ; /* 0x0000000109077819 */
/* 0x000fe200000006ff */
/*03c0*/ BSSY B1, 0x6e0 ; /* 0x0000031000017945 */
/* 0x000fe60003800000 */
/*03d0*/ SHF.R.U32.HI R13, RZ, 0x18, R7 ; /* 0x00000018ff0d7819 */
/* 0x000fe20000011607 */
/*03e0*/ IMAD.MOV.U32 R7, RZ, RZ, R9 ; /* 0x000000ffff077224 */
/* 0x000fc600078e0009 */
/*03f0*/ ISETP.NE.U32.AND P0, PT, R13, RZ, PT ; /* 0x000000ff0d00720c */
/* 0x000fda0003f05070 */
/*0400*/ @P0 BRA 0x4b0 ; /* 0x000000a000000947 */
/* 0x000fea0003800000 */
/*0410*/ SHF.L.U32 R8, R7, 0x1, RZ ; /* 0x0000000107087819 */
/* 0x000fc800000006ff */
/*0420*/ ISETP.NE.AND P0, PT, R8, RZ, PT ; /* 0x000000ff0800720c */
/* 0x000fda0003f05270 */
/*0430*/ @P0 FFMA R9, R7, 1.84467440737095516160e+19, RZ ; /* 0x5f80000007090823 */
/* 0x000fe200000000ff */
/*0440*/ @!P0 MUFU.RCP R8, R7 ; /* 0x0000000700088308 */
/* 0x000ff00000001000 */
/*0450*/ @P0 MUFU.RCP R10, R9 ; /* 0x00000009000a0308 */
/* 0x000e240000001000 */
/*0460*/ @P0 FFMA R11, R9, R10, -1 ; /* 0xbf800000090b0423 */
/* 0x001fc8000000000a */
/*0470*/ @P0 FADD.FTZ R11, -R11, -RZ ; /* 0x800000ff0b0b0221 */
/* 0x000fc80000010100 */
/*0480*/ @P0 FFMA R11, R10, R11, R10 ; /* 0x0000000b0a0b0223 */
/* 0x000fc8000000000a */
/*0490*/ @P0 FFMA R8, R11, 1.84467440737095516160e+19, RZ ; /* 0x5f8000000b080823 */
/* 0x000fe200000000ff */
/*04a0*/ BRA 0x6d0 ; /* 0x0000022000007947 */
/* 0x000fea0003800000 */
/*04b0*/ IADD3 R15, R13, -0xfd, RZ ; /* 0xffffff030d0f7810 */
/* 0x000fc80007ffe0ff */
/*04c0*/ ISETP.GT.U32.AND P0, PT, R15, 0x1, PT ; /* 0x000000010f00780c */
/* 0x000fda0003f04070 */
/*04d0*/ @P0 BRA 0x6c0 ; /* 0x000001e000000947 */
/* 0x000fea0003800000 */
/*04e0*/ LOP3.LUT R8, R7, 0x7fffff, RZ, 0xc0, !PT ; /* 0x007fffff07087812 */
/* 0x000fe200078ec0ff */
/*04f0*/ IMAD.MOV.U32 R12, RZ, RZ, 0x3 ; /* 0x00000003ff0c7424 */
/* 0x000fc600078e00ff */
/*0500*/ LOP3.LUT R8, R8, 0x3f800000, RZ, 0xfc, !PT ; /* 0x3f80000008087812 */
/* 0x000fe400078efcff */
/*0510*/ SHF.L.U32 R12, R12, R15, RZ ; /* 0x0000000f0c0c7219 */
/* 0x000fe400000006ff */
/*0520*/ MUFU.RCP R9, R8 ; /* 0x0000000800097308 */
/* 0x000e240000001000 */
/*0530*/ FFMA R10, R8, R9, -1 ; /* 0xbf800000080a7423 */
/* 0x001fc80000000009 */
/*0540*/ FADD.FTZ R10, -R10, -RZ ; /* 0x800000ff0a0a7221 */
/* 0x000fc80000010100 */
/*0550*/ FFMA.RM R11, R9.reuse, R10.reuse, R9.reuse ; /* 0x0000000a090b7223 */
/* 0x1c0fe40000004009 */
/*0560*/ FFMA.RP R10, R9, R10, R9 ; /* 0x0000000a090a7223 */
/* 0x000fc60000008009 */
/*0570*/ LOP3.LUT R9, R11.reuse, 0x7fffff, RZ, 0xc0, !PT ; /* 0x007fffff0b097812 */
/* 0x040fe400078ec0ff */
/*0580*/ FSETP.NEU.FTZ.AND P0, PT, R11, R10, PT ; /* 0x0000000a0b00720b */
/* 0x000fe40003f1d000 */
/*0590*/ LOP3.LUT R9, R9, 0x800000, RZ, 0xfc, !PT ; /* 0x0080000009097812 */
/* 0x000fe400078efcff */
/*05a0*/ SEL R10, RZ, 0xffffffff, !P0 ; /* 0xffffffffff0a7807 */
/* 0x000fe40004000000 */
/*05b0*/ LOP3.LUT R12, R12, R9, RZ, 0xc0, !PT ; /* 0x000000090c0c7212 */
/* 0x000fe400078ec0ff */
/*05c0*/ IADD3 R10, -R10, RZ, RZ ; /* 0x000000ff0a0a7210 */
/* 0x000fc40007ffe1ff */
/*05d0*/ SHF.R.U32.HI R12, RZ, R15.reuse, R12 ; /* 0x0000000fff0c7219 */
/* 0x080fe4000001160c */
/*05e0*/ LOP3.LUT P1, RZ, R10, R15, R9, 0xf8, !PT ; /* 0x0000000f0aff7212 */
/* 0x000fe4000782f809 */
/*05f0*/ LOP3.LUT P0, RZ, R12.reuse, 0x1, RZ, 0xc0, !PT ; /* 0x000000010cff7812 */
/* 0x040fe4000780c0ff */
/*0600*/ LOP3.LUT P2, RZ, R12, 0x2, RZ, 0xc0, !PT ; /* 0x000000020cff7812 */
/* 0x000fc8000784c0ff */
/*0610*/ PLOP3.LUT P0, PT, P0, P1, P2, 0xe0, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40000703c20 */
/*0620*/ LOP3.LUT P1, RZ, R7, 0x7fffff, RZ, 0xc0, !PT ; /* 0x007fffff07ff7812 */
/* 0x000fe4000782c0ff */
/*0630*/ SEL R8, RZ, 0x1, !P0 ; /* 0x00000001ff087807 */
/* 0x000fca0004000000 */
/*0640*/ IMAD.MOV R8, RZ, RZ, -R8 ; /* 0x000000ffff087224 */
/* 0x000fca00078e0a08 */
/*0650*/ ISETP.GE.AND P0, PT, R8, RZ, PT ; /* 0x000000ff0800720c */
/* 0x000fe40003f06270 */
/*0660*/ IADD3 R8, R13, -0xfc, RZ ; /* 0xffffff040d087810 */
/* 0x000fc80007ffe0ff */
/*0670*/ SHF.R.U32.HI R8, RZ, R8, R9 ; /* 0x00000008ff087219 */
/* 0x000fce0000011609 */
/*0680*/ @!P0 IADD3 R8, R8, 0x1, RZ ; /* 0x0000000108088810 */
/* 0x000fc80007ffe0ff */
/*0690*/ @!P1 SHF.L.U32 R8, R8, 0x1, RZ ; /* 0x0000000108089819 */
/* 0x000fc800000006ff */
/*06a0*/ LOP3.LUT R8, R8, 0x80000000, R7, 0xf8, !PT ; /* 0x8000000008087812 */
/* 0x000fe200078ef807 */
/*06b0*/ BRA 0x6d0 ; /* 0x0000001000007947 */
/* 0x000fea0003800000 */
/*06c0*/ MUFU.RCP R8, R7 ; /* 0x0000000700087308 */
/* 0x0000640000001000 */
/*06d0*/ BSYNC B1 ; /* 0x0000000000017941 */
/* 0x000fea0003800000 */
/*06e0*/ IMAD.MOV.U32 R7, RZ, RZ, 0x0 ; /* 0x00000000ff077424 */
/* 0x001fc800078e00ff */
/*06f0*/ RET.REL.NODEC R6 0x0 ; /* 0xfffff90006007950 */
/* 0x000fea0003c3ffff */
/*0700*/ LOP3.LUT P0, RZ, R7, 0x7fffffff, RZ, 0xc0, !PT ; /* 0x7fffffff07ff7812 */
/* 0x000fda000780c0ff */
/*0710*/ @!P0 MOV R6, R7 ; /* 0x0000000700068202 */
/* 0x000fe20000000f00 */
/*0720*/ @!P0 BRA 0x830 ; /* 0x0000010000008947 */
/* 0x000fea0003800000 */
/*0730*/ FSETP.GEU.FTZ.AND P0, PT, R7, RZ, PT ; /* 0x000000ff0700720b */
/* 0x000fda0003f1e000 */
/*0740*/ @!P0 IMAD.MOV.U32 R6, RZ, RZ, 0x7fffffff ; /* 0x7fffffffff068424 */
/* 0x000fe200078e00ff */
/*0750*/ @!P0 BRA 0x830 ; /* 0x000000d000008947 */
/* 0x000fea0003800000 */
/*0760*/ FSETP.GTU.FTZ.AND P0, PT, |R7|, +INF , PT ; /* 0x7f8000000700780b */
/* 0x000fda0003f1c200 */
/*0770*/ @P0 FADD.FTZ R6, R7, 1 ; /* 0x3f80000007060421 */
/* 0x000fe20000010000 */
/*0780*/ @P0 BRA 0x830 ; /* 0x000000a000000947 */
/* 0x000fea0003800000 */
/*0790*/ FSETP.NEU.FTZ.AND P0, PT, |R7|, +INF , PT ; /* 0x7f8000000700780b */
/* 0x000fda0003f1d200 */
/*07a0*/ @P0 FFMA R8, R7, 1.84467440737095516160e+19, RZ ; /* 0x5f80000007080823 */
/* 0x000fc800000000ff */
/*07b0*/ @P0 MUFU.RSQ R9, R8 ; /* 0x0000000800090308 */
/* 0x000e240000001400 */
/*07c0*/ @P0 FMUL.FTZ R11, R8, R9 ; /* 0x00000009080b0220 */
/* 0x001fe40000410000 */
/*07d0*/ @P0 FMUL.FTZ R9, R9, 0.5 ; /* 0x3f00000009090820 */
/* 0x000fe40000410000 */
/*07e0*/ @P0 FADD.FTZ R6, -R11, -RZ ; /* 0x800000ff0b060221 */
/* 0x000fc80000010100 */
/*07f0*/ @P0 FFMA R10, R11, R6, R8 ; /* 0x000000060b0a0223 */
/* 0x000fe20000000008 */
/*0800*/ @!P0 MOV R6, R7 ; /* 0x0000000700068202 */
/* 0x000fc60000000f00 */
/*0810*/ @P0 FFMA R9, R10, R9, R11 ; /* 0x000000090a090223 */
/* 0x000fc8000000000b */
/*0820*/ @P0 FMUL.FTZ R6, R9, 2.3283064365386962891e-10 ; /* 0x2f80000009060820 */
/* 0x000fc80000410000 */
/*0830*/ IMAD.MOV.U32 R8, RZ, RZ, R6 ; /* 0x000000ffff087224 */
/* 0x000fe200078e0006 */
/*0840*/ MOV R6, R12 ; /* 0x0000000c00067202 */
/* 0x000fe20000000f00 */
/*0850*/ IMAD.MOV.U32 R7, RZ, RZ, 0x0 ; /* 0x00000000ff077424 */
/* 0x000fc800078e00ff */
/*0860*/ RET.REL.NODEC R6 0x0 ; /* 0xfffff79006007950 */
/* 0x000fea0003c3ffff */
/*0870*/ BRA 0x870; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0880*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0890*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*08a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*08b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*08c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*08d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*08e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*08f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
__global__ void zupdate(float *z, float *z0, float tau, int nx, int ny)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int idx = x + y*nx;
if (x<nx && y<ny)
{
float a = z[2 * idx + 0];
float b = z[2 * idx + 1];
float t = 1 / (1 + tau*sqrtf(a*a + b*b));
z[2 * idx + 0] = (z0[2 * idx + 0] + tau*z[2 * idx + 0])*t;
z[2 * idx + 1] = (z0[2 * idx + 1] + tau*z[2 * idx + 1])*t;
}
} | .file "tmpxft_0008a45d_00000000-6_zupdate.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z31__device_stub__Z7zupdatePfS_fiiPfS_fii
.type _Z31__device_stub__Z7zupdatePfS_fiiPfS_fii, @function
_Z31__device_stub__Z7zupdatePfS_fiiPfS_fii:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movss %xmm0, 12(%rsp)
movl %edx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
leaq 4(%rsp), %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z7zupdatePfS_fii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z31__device_stub__Z7zupdatePfS_fiiPfS_fii, .-_Z31__device_stub__Z7zupdatePfS_fiiPfS_fii
.globl _Z7zupdatePfS_fii
.type _Z7zupdatePfS_fii, @function
_Z7zupdatePfS_fii:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z31__device_stub__Z7zupdatePfS_fiiPfS_fii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z7zupdatePfS_fii, .-_Z7zupdatePfS_fii
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z7zupdatePfS_fii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z7zupdatePfS_fii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
__global__ void zupdate(float *z, float *z0, float tau, int nx, int ny)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int idx = x + y*nx;
if (x<nx && y<ny)
{
float a = z[2 * idx + 0];
float b = z[2 * idx + 1];
float t = 1 / (1 + tau*sqrtf(a*a + b*b));
z[2 * idx + 0] = (z0[2 * idx + 0] + tau*z[2 * idx + 0])*t;
z[2 * idx + 1] = (z0[2 * idx + 1] + tau*z[2 * idx + 1])*t;
}
} | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void zupdate(float *z, float *z0, float tau, int nx, int ny)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int idx = x + y*nx;
if (x<nx && y<ny)
{
float a = z[2 * idx + 0];
float b = z[2 * idx + 1];
float t = 1 / (1 + tau*sqrtf(a*a + b*b));
z[2 * idx + 0] = (z0[2 * idx + 0] + tau*z[2 * idx + 0])*t;
z[2 * idx + 1] = (z0[2 * idx + 1] + tau*z[2 * idx + 1])*t;
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void zupdate(float *z, float *z0, float tau, int nx, int ny)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int idx = x + y*nx;
if (x<nx && y<ny)
{
float a = z[2 * idx + 0];
float b = z[2 * idx + 1];
float t = 1 / (1 + tau*sqrtf(a*a + b*b));
z[2 * idx + 0] = (z0[2 * idx + 0] + tau*z[2 * idx + 0])*t;
z[2 * idx + 1] = (z0[2 * idx + 1] + tau*z[2 * idx + 1])*t;
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z7zupdatePfS_fii
.globl _Z7zupdatePfS_fii
.p2align 8
.type _Z7zupdatePfS_fii,@function
_Z7zupdatePfS_fii:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b64 s[4:5], s[0:1], 0x14
v_and_b32_e32 v2, 0x3ff, v0
v_bfe_u32 v3, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s2, 0xffff
s_lshr_b32 s2, s2, 16
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_mad_u64_u32 v[0:1], null, s14, s3, v[2:3]
v_mad_u64_u32 v[1:2], null, s15, s2, v[3:4]
v_cmp_gt_i32_e32 vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_gt_i32_e64 s2, s5, v1
s_and_b32 s2, vcc_lo, s2
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_2
v_mul_lo_u32 v1, v1, s4
s_clause 0x1
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b32 s1, s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_lshl_u32 v0, v1, v0, 1
v_or_b32_e32 v2, 1, v0
v_ashrrev_i32_e32 v1, 31, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_ashrrev_i32_e32 v3, 31, v2
v_lshlrev_b64 v[0:1], 2, v[0:1]
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[2:3]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v4, vcc_lo, s4, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_ci_u32_e32 v5, vcc_lo, s5, v3, vcc_lo
v_add_co_u32 v6, vcc_lo, s4, v0
v_add_co_ci_u32_e32 v7, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s6, v0
s_clause 0x1
global_load_b32 v8, v[4:5], off
global_load_b32 v9, v[6:7], off
v_add_co_ci_u32_e32 v1, vcc_lo, s7, v1, vcc_lo
global_load_b32 v0, v[0:1], off
s_waitcnt vmcnt(2)
v_mul_f32_e32 v1, v8, v8
s_waitcnt vmcnt(1)
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
v_fmac_f32_e32 v1, v9, v9
s_waitcnt vmcnt(0)
v_fmac_f32_e32 v0, s1, v9
v_mul_f32_e32 v10, 0x4f800000, v1
v_cmp_gt_f32_e32 vcc_lo, 0xf800000, v1
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cndmask_b32_e32 v1, v1, v10, vcc_lo
v_sqrt_f32_e32 v10, v1
s_waitcnt_depctr 0xfff
v_add_nc_u32_e32 v11, -1, v10
v_add_nc_u32_e32 v12, 1, v10
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_fma_f32 v13, -v11, v10, v1
v_fma_f32 v14, -v12, v10, v1
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_ge_f32_e64 s0, 0, v13
v_cndmask_b32_e64 v10, v10, v11, s0
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_lt_f32_e64 s0, 0, v14
v_cndmask_b32_e64 v10, v10, v12, s0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_f32_e32 v11, 0x37800000, v10
v_cndmask_b32_e32 v10, v10, v11, vcc_lo
v_cmp_class_f32_e64 vcc_lo, v1, 0x260
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cndmask_b32_e32 v1, v10, v1, vcc_lo
v_fma_f32 v1, v1, s1, 1.0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_div_scale_f32 v10, null, v1, v1, 1.0
v_div_scale_f32 v13, vcc_lo, 1.0, v1, 1.0
v_rcp_f32_e32 v11, v10
s_waitcnt_depctr 0xfff
v_fma_f32 v12, -v10, v11, 1.0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fmac_f32_e32 v11, v12, v11
v_mul_f32_e32 v12, v13, v11
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f32 v14, -v10, v12, v13
v_fmac_f32_e32 v12, v14, v11
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f32 v10, -v10, v12, v13
v_div_fmas_f32 v10, v10, v11, v12
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_div_fixup_f32 v9, v10, v1, 1.0
v_mul_f32_e32 v10, v0, v9
v_add_co_u32 v0, vcc_lo, s6, v2
v_add_co_ci_u32_e32 v1, vcc_lo, s7, v3, vcc_lo
global_store_b32 v[6:7], v10, off
global_load_b32 v0, v[0:1], off
s_waitcnt vmcnt(0)
v_fmac_f32_e32 v0, s1, v8
s_delay_alu instid0(VALU_DEP_1)
v_mul_f32_e32 v0, v0, v9
global_store_b32 v[4:5], v0, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z7zupdatePfS_fii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 15
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z7zupdatePfS_fii, .Lfunc_end0-_Z7zupdatePfS_fii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z7zupdatePfS_fii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z7zupdatePfS_fii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 15
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void zupdate(float *z, float *z0, float tau, int nx, int ny)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int idx = x + y*nx;
if (x<nx && y<ny)
{
float a = z[2 * idx + 0];
float b = z[2 * idx + 1];
float t = 1 / (1 + tau*sqrtf(a*a + b*b));
z[2 * idx + 0] = (z0[2 * idx + 0] + tau*z[2 * idx + 0])*t;
z[2 * idx + 1] = (z0[2 * idx + 1] + tau*z[2 * idx + 1])*t;
}
} | .text
.file "zupdate.hip"
.globl _Z22__device_stub__zupdatePfS_fii # -- Begin function _Z22__device_stub__zupdatePfS_fii
.p2align 4, 0x90
.type _Z22__device_stub__zupdatePfS_fii,@function
_Z22__device_stub__zupdatePfS_fii: # @_Z22__device_stub__zupdatePfS_fii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movss %xmm0, 12(%rsp)
movl %edx, 8(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 4(%rsp), %rax
movq %rax, 112(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z7zupdatePfS_fii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z22__device_stub__zupdatePfS_fii, .Lfunc_end0-_Z22__device_stub__zupdatePfS_fii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z7zupdatePfS_fii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z7zupdatePfS_fii,@object # @_Z7zupdatePfS_fii
.section .rodata,"a",@progbits
.globl _Z7zupdatePfS_fii
.p2align 3, 0x0
_Z7zupdatePfS_fii:
.quad _Z22__device_stub__zupdatePfS_fii
.size _Z7zupdatePfS_fii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z7zupdatePfS_fii"
.size .L__unnamed_1, 18
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z22__device_stub__zupdatePfS_fii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z7zupdatePfS_fii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z7zupdatePfS_fii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R3, SR_CTAID.Y ; /* 0x0000000000037919 */
/* 0x000e280000002600 */
/*0020*/ S2R R2, SR_TID.Y ; /* 0x0000000000027919 */
/* 0x000e280000002200 */
/*0030*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e680000002500 */
/*0040*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000e620000002100 */
/*0050*/ IMAD R3, R3, c[0x0][0x4], R2 ; /* 0x0000010003037a24 */
/* 0x001fca00078e0202 */
/*0060*/ ISETP.GE.AND P0, PT, R3, c[0x0][0x178], PT ; /* 0x00005e0003007a0c */
/* 0x000fe20003f06270 */
/*0070*/ IMAD R0, R0, c[0x0][0x0], R5 ; /* 0x0000000000007a24 */
/* 0x002fca00078e0205 */
/*0080*/ ISETP.GE.OR P0, PT, R0, c[0x0][0x174], P0 ; /* 0x00005d0000007a0c */
/* 0x000fda0000706670 */
/*0090*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00a0*/ IMAD R5, R3, c[0x0][0x174], R0 ; /* 0x00005d0003057a24 */
/* 0x000fe200078e0200 */
/*00b0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*00c0*/ IMAD.MOV.U32 R2, RZ, RZ, 0x4 ; /* 0x00000004ff027424 */
/* 0x000fc600078e00ff */
/*00d0*/ SHF.L.U32 R5, R5, 0x1, RZ ; /* 0x0000000105057819 */
/* 0x000fca00000006ff */
/*00e0*/ IMAD.WIDE R2, R5, R2, c[0x0][0x160] ; /* 0x0000580005027625 */
/* 0x000fca00078e0202 */
/*00f0*/ LDG.E R0, [R2.64+0x4] ; /* 0x0000040402007981 */
/* 0x000ea8000c1e1900 */
/*0100*/ LDG.E R4, [R2.64] ; /* 0x0000000402047981 */
/* 0x000ee2000c1e1900 */
/*0110*/ BSSY B0, 0x210 ; /* 0x000000f000007945 */
/* 0x000fe20003800000 */
/*0120*/ FMUL R7, R0, R0 ; /* 0x0000000000077220 */
/* 0x004fc80000400000 */
/*0130*/ FFMA R7, R4, R4, R7 ; /* 0x0000000404077223 */
/* 0x008fc80000000007 */
/*0140*/ MUFU.RSQ R6, R7 ; /* 0x0000000700067308 */
/* 0x0000620000001400 */
/*0150*/ IADD3 R8, R7, -0xd000000, RZ ; /* 0xf300000007087810 */
/* 0x000fc80007ffe0ff */
/*0160*/ ISETP.GT.U32.AND P0, PT, R8, 0x727fffff, PT ; /* 0x727fffff0800780c */
/* 0x000fda0003f04070 */
/*0170*/ @!P0 BRA 0x1c0 ; /* 0x0000004000008947 */
/* 0x000fea0003800000 */
/*0180*/ MOV R12, 0x1a0 ; /* 0x000001a0000c7802 */
/* 0x003fe40000000f00 */
/*0190*/ CALL.REL.NOINC 0x700 ; /* 0x0000056000007944 */
/* 0x000fea0003c00000 */
/*01a0*/ IMAD.MOV.U32 R6, RZ, RZ, R8 ; /* 0x000000ffff067224 */
/* 0x000fe200078e0008 */
/*01b0*/ BRA 0x200 ; /* 0x0000004000007947 */
/* 0x000fea0003800000 */
/*01c0*/ FMUL.FTZ R8, R7, R6 ; /* 0x0000000607087220 */
/* 0x003fe40000410000 */
/*01d0*/ FMUL.FTZ R6, R6, 0.5 ; /* 0x3f00000006067820 */
/* 0x000fe40000410000 */
/*01e0*/ FFMA R7, -R8, R8, R7 ; /* 0x0000000808077223 */
/* 0x000fc80000000107 */
/*01f0*/ FFMA R6, R7, R6, R8 ; /* 0x0000000607067223 */
/* 0x000fe40000000008 */
/*0200*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0210*/ MOV R7, c[0x0][0x170] ; /* 0x00005c0000077a02 */
/* 0x000fe20000000f00 */
/*0220*/ BSSY B0, 0x300 ; /* 0x000000d000007945 */
/* 0x000fe80003800000 */
/*0230*/ FFMA R9, R6, R7, 1 ; /* 0x3f80000006097423 */
/* 0x000fca0000000007 */
/*0240*/ IADD3 R6, R9, 0x1800000, RZ ; /* 0x0180000009067810 */
/* 0x000fc80007ffe0ff */
/*0250*/ LOP3.LUT R6, R6, 0x7f800000, RZ, 0xc0, !PT ; /* 0x7f80000006067812 */
/* 0x000fc800078ec0ff */
/*0260*/ ISETP.GT.U32.AND P0, PT, R6, 0x1ffffff, PT ; /* 0x01ffffff0600780c */
/* 0x000fda0003f04070 */
/*0270*/ @P0 BRA 0x2b0 ; /* 0x0000003000000947 */
/* 0x000fea0003800000 */
/*0280*/ MOV R6, 0x2a0 ; /* 0x000002a000067802 */
/* 0x000fe40000000f00 */
/*0290*/ CALL.REL.NOINC 0x3b0 ; /* 0x0000011000007944 */
/* 0x000fea0003c00000 */
/*02a0*/ BRA 0x2f0 ; /* 0x0000004000007947 */
/* 0x000fea0003800000 */
/*02b0*/ MUFU.RCP R8, R9 ; /* 0x0000000900087308 */
/* 0x000e240000001000 */
/*02c0*/ FFMA R6, R9, R8, -1 ; /* 0xbf80000009067423 */
/* 0x001fc80000000008 */
/*02d0*/ FADD.FTZ R7, -R6, -RZ ; /* 0x800000ff06077221 */
/* 0x000fc80000010100 */
/*02e0*/ FFMA R8, R8, R7, R8 ; /* 0x0000000708087223 */
/* 0x000fe40000000008 */
/*02f0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0300*/ IMAD.MOV.U32 R6, RZ, RZ, 0x4 ; /* 0x00000004ff067424 */
/* 0x000fc800078e00ff */
/*0310*/ IMAD.WIDE R6, R5, R6, c[0x0][0x168] ; /* 0x00005a0005067625 */
/* 0x000fca00078e0206 */
/*0320*/ LDG.E R5, [R6.64] ; /* 0x0000000406057981 */
/* 0x000ea4000c1e1900 */
/*0330*/ FFMA R5, R4, c[0x0][0x170], R5 ; /* 0x00005c0004057a23 */
/* 0x004fc80000000005 */
/*0340*/ FMUL R5, R5, R8 ; /* 0x0000000805057220 */
/* 0x002fca0000400000 */
/*0350*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe8000c101904 */
/*0360*/ LDG.E R9, [R6.64+0x4] ; /* 0x0000040406097981 */
/* 0x000ea4000c1e1900 */
/*0370*/ FFMA R9, R0, c[0x0][0x170], R9 ; /* 0x00005c0000097a23 */
/* 0x004fc80000000009 */
/*0380*/ FMUL R9, R9, R8 ; /* 0x0000000809097220 */
/* 0x000fca0000400000 */
/*0390*/ STG.E [R2.64+0x4], R9 ; /* 0x0000040902007986 */
/* 0x000fe2000c101904 */
/*03a0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*03b0*/ SHF.L.U32 R7, R9, 0x1, RZ ; /* 0x0000000109077819 */
/* 0x000fe200000006ff */
/*03c0*/ BSSY B1, 0x6e0 ; /* 0x0000031000017945 */
/* 0x000fe60003800000 */
/*03d0*/ SHF.R.U32.HI R13, RZ, 0x18, R7 ; /* 0x00000018ff0d7819 */
/* 0x000fe20000011607 */
/*03e0*/ IMAD.MOV.U32 R7, RZ, RZ, R9 ; /* 0x000000ffff077224 */
/* 0x000fc600078e0009 */
/*03f0*/ ISETP.NE.U32.AND P0, PT, R13, RZ, PT ; /* 0x000000ff0d00720c */
/* 0x000fda0003f05070 */
/*0400*/ @P0 BRA 0x4b0 ; /* 0x000000a000000947 */
/* 0x000fea0003800000 */
/*0410*/ SHF.L.U32 R8, R7, 0x1, RZ ; /* 0x0000000107087819 */
/* 0x000fc800000006ff */
/*0420*/ ISETP.NE.AND P0, PT, R8, RZ, PT ; /* 0x000000ff0800720c */
/* 0x000fda0003f05270 */
/*0430*/ @P0 FFMA R9, R7, 1.84467440737095516160e+19, RZ ; /* 0x5f80000007090823 */
/* 0x000fe200000000ff */
/*0440*/ @!P0 MUFU.RCP R8, R7 ; /* 0x0000000700088308 */
/* 0x000ff00000001000 */
/*0450*/ @P0 MUFU.RCP R10, R9 ; /* 0x00000009000a0308 */
/* 0x000e240000001000 */
/*0460*/ @P0 FFMA R11, R9, R10, -1 ; /* 0xbf800000090b0423 */
/* 0x001fc8000000000a */
/*0470*/ @P0 FADD.FTZ R11, -R11, -RZ ; /* 0x800000ff0b0b0221 */
/* 0x000fc80000010100 */
/*0480*/ @P0 FFMA R11, R10, R11, R10 ; /* 0x0000000b0a0b0223 */
/* 0x000fc8000000000a */
/*0490*/ @P0 FFMA R8, R11, 1.84467440737095516160e+19, RZ ; /* 0x5f8000000b080823 */
/* 0x000fe200000000ff */
/*04a0*/ BRA 0x6d0 ; /* 0x0000022000007947 */
/* 0x000fea0003800000 */
/*04b0*/ IADD3 R15, R13, -0xfd, RZ ; /* 0xffffff030d0f7810 */
/* 0x000fc80007ffe0ff */
/*04c0*/ ISETP.GT.U32.AND P0, PT, R15, 0x1, PT ; /* 0x000000010f00780c */
/* 0x000fda0003f04070 */
/*04d0*/ @P0 BRA 0x6c0 ; /* 0x000001e000000947 */
/* 0x000fea0003800000 */
/*04e0*/ LOP3.LUT R8, R7, 0x7fffff, RZ, 0xc0, !PT ; /* 0x007fffff07087812 */
/* 0x000fe200078ec0ff */
/*04f0*/ IMAD.MOV.U32 R12, RZ, RZ, 0x3 ; /* 0x00000003ff0c7424 */
/* 0x000fc600078e00ff */
/*0500*/ LOP3.LUT R8, R8, 0x3f800000, RZ, 0xfc, !PT ; /* 0x3f80000008087812 */
/* 0x000fe400078efcff */
/*0510*/ SHF.L.U32 R12, R12, R15, RZ ; /* 0x0000000f0c0c7219 */
/* 0x000fe400000006ff */
/*0520*/ MUFU.RCP R9, R8 ; /* 0x0000000800097308 */
/* 0x000e240000001000 */
/*0530*/ FFMA R10, R8, R9, -1 ; /* 0xbf800000080a7423 */
/* 0x001fc80000000009 */
/*0540*/ FADD.FTZ R10, -R10, -RZ ; /* 0x800000ff0a0a7221 */
/* 0x000fc80000010100 */
/*0550*/ FFMA.RM R11, R9.reuse, R10.reuse, R9.reuse ; /* 0x0000000a090b7223 */
/* 0x1c0fe40000004009 */
/*0560*/ FFMA.RP R10, R9, R10, R9 ; /* 0x0000000a090a7223 */
/* 0x000fc60000008009 */
/*0570*/ LOP3.LUT R9, R11.reuse, 0x7fffff, RZ, 0xc0, !PT ; /* 0x007fffff0b097812 */
/* 0x040fe400078ec0ff */
/*0580*/ FSETP.NEU.FTZ.AND P0, PT, R11, R10, PT ; /* 0x0000000a0b00720b */
/* 0x000fe40003f1d000 */
/*0590*/ LOP3.LUT R9, R9, 0x800000, RZ, 0xfc, !PT ; /* 0x0080000009097812 */
/* 0x000fe400078efcff */
/*05a0*/ SEL R10, RZ, 0xffffffff, !P0 ; /* 0xffffffffff0a7807 */
/* 0x000fe40004000000 */
/*05b0*/ LOP3.LUT R12, R12, R9, RZ, 0xc0, !PT ; /* 0x000000090c0c7212 */
/* 0x000fe400078ec0ff */
/*05c0*/ IADD3 R10, -R10, RZ, RZ ; /* 0x000000ff0a0a7210 */
/* 0x000fc40007ffe1ff */
/*05d0*/ SHF.R.U32.HI R12, RZ, R15.reuse, R12 ; /* 0x0000000fff0c7219 */
/* 0x080fe4000001160c */
/*05e0*/ LOP3.LUT P1, RZ, R10, R15, R9, 0xf8, !PT ; /* 0x0000000f0aff7212 */
/* 0x000fe4000782f809 */
/*05f0*/ LOP3.LUT P0, RZ, R12.reuse, 0x1, RZ, 0xc0, !PT ; /* 0x000000010cff7812 */
/* 0x040fe4000780c0ff */
/*0600*/ LOP3.LUT P2, RZ, R12, 0x2, RZ, 0xc0, !PT ; /* 0x000000020cff7812 */
/* 0x000fc8000784c0ff */
/*0610*/ PLOP3.LUT P0, PT, P0, P1, P2, 0xe0, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40000703c20 */
/*0620*/ LOP3.LUT P1, RZ, R7, 0x7fffff, RZ, 0xc0, !PT ; /* 0x007fffff07ff7812 */
/* 0x000fe4000782c0ff */
/*0630*/ SEL R8, RZ, 0x1, !P0 ; /* 0x00000001ff087807 */
/* 0x000fca0004000000 */
/*0640*/ IMAD.MOV R8, RZ, RZ, -R8 ; /* 0x000000ffff087224 */
/* 0x000fca00078e0a08 */
/*0650*/ ISETP.GE.AND P0, PT, R8, RZ, PT ; /* 0x000000ff0800720c */
/* 0x000fe40003f06270 */
/*0660*/ IADD3 R8, R13, -0xfc, RZ ; /* 0xffffff040d087810 */
/* 0x000fc80007ffe0ff */
/*0670*/ SHF.R.U32.HI R8, RZ, R8, R9 ; /* 0x00000008ff087219 */
/* 0x000fce0000011609 */
/*0680*/ @!P0 IADD3 R8, R8, 0x1, RZ ; /* 0x0000000108088810 */
/* 0x000fc80007ffe0ff */
/*0690*/ @!P1 SHF.L.U32 R8, R8, 0x1, RZ ; /* 0x0000000108089819 */
/* 0x000fc800000006ff */
/*06a0*/ LOP3.LUT R8, R8, 0x80000000, R7, 0xf8, !PT ; /* 0x8000000008087812 */
/* 0x000fe200078ef807 */
/*06b0*/ BRA 0x6d0 ; /* 0x0000001000007947 */
/* 0x000fea0003800000 */
/*06c0*/ MUFU.RCP R8, R7 ; /* 0x0000000700087308 */
/* 0x0000640000001000 */
/*06d0*/ BSYNC B1 ; /* 0x0000000000017941 */
/* 0x000fea0003800000 */
/*06e0*/ IMAD.MOV.U32 R7, RZ, RZ, 0x0 ; /* 0x00000000ff077424 */
/* 0x001fc800078e00ff */
/*06f0*/ RET.REL.NODEC R6 0x0 ; /* 0xfffff90006007950 */
/* 0x000fea0003c3ffff */
/*0700*/ LOP3.LUT P0, RZ, R7, 0x7fffffff, RZ, 0xc0, !PT ; /* 0x7fffffff07ff7812 */
/* 0x000fda000780c0ff */
/*0710*/ @!P0 MOV R6, R7 ; /* 0x0000000700068202 */
/* 0x000fe20000000f00 */
/*0720*/ @!P0 BRA 0x830 ; /* 0x0000010000008947 */
/* 0x000fea0003800000 */
/*0730*/ FSETP.GEU.FTZ.AND P0, PT, R7, RZ, PT ; /* 0x000000ff0700720b */
/* 0x000fda0003f1e000 */
/*0740*/ @!P0 IMAD.MOV.U32 R6, RZ, RZ, 0x7fffffff ; /* 0x7fffffffff068424 */
/* 0x000fe200078e00ff */
/*0750*/ @!P0 BRA 0x830 ; /* 0x000000d000008947 */
/* 0x000fea0003800000 */
/*0760*/ FSETP.GTU.FTZ.AND P0, PT, |R7|, +INF , PT ; /* 0x7f8000000700780b */
/* 0x000fda0003f1c200 */
/*0770*/ @P0 FADD.FTZ R6, R7, 1 ; /* 0x3f80000007060421 */
/* 0x000fe20000010000 */
/*0780*/ @P0 BRA 0x830 ; /* 0x000000a000000947 */
/* 0x000fea0003800000 */
/*0790*/ FSETP.NEU.FTZ.AND P0, PT, |R7|, +INF , PT ; /* 0x7f8000000700780b */
/* 0x000fda0003f1d200 */
/*07a0*/ @P0 FFMA R8, R7, 1.84467440737095516160e+19, RZ ; /* 0x5f80000007080823 */
/* 0x000fc800000000ff */
/*07b0*/ @P0 MUFU.RSQ R9, R8 ; /* 0x0000000800090308 */
/* 0x000e240000001400 */
/*07c0*/ @P0 FMUL.FTZ R11, R8, R9 ; /* 0x00000009080b0220 */
/* 0x001fe40000410000 */
/*07d0*/ @P0 FMUL.FTZ R9, R9, 0.5 ; /* 0x3f00000009090820 */
/* 0x000fe40000410000 */
/*07e0*/ @P0 FADD.FTZ R6, -R11, -RZ ; /* 0x800000ff0b060221 */
/* 0x000fc80000010100 */
/*07f0*/ @P0 FFMA R10, R11, R6, R8 ; /* 0x000000060b0a0223 */
/* 0x000fe20000000008 */
/*0800*/ @!P0 MOV R6, R7 ; /* 0x0000000700068202 */
/* 0x000fc60000000f00 */
/*0810*/ @P0 FFMA R9, R10, R9, R11 ; /* 0x000000090a090223 */
/* 0x000fc8000000000b */
/*0820*/ @P0 FMUL.FTZ R6, R9, 2.3283064365386962891e-10 ; /* 0x2f80000009060820 */
/* 0x000fc80000410000 */
/*0830*/ IMAD.MOV.U32 R8, RZ, RZ, R6 ; /* 0x000000ffff087224 */
/* 0x000fe200078e0006 */
/*0840*/ MOV R6, R12 ; /* 0x0000000c00067202 */
/* 0x000fe20000000f00 */
/*0850*/ IMAD.MOV.U32 R7, RZ, RZ, 0x0 ; /* 0x00000000ff077424 */
/* 0x000fc800078e00ff */
/*0860*/ RET.REL.NODEC R6 0x0 ; /* 0xfffff79006007950 */
/* 0x000fea0003c3ffff */
/*0870*/ BRA 0x870; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0880*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0890*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*08a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*08b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*08c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*08d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*08e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*08f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z7zupdatePfS_fii
.globl _Z7zupdatePfS_fii
.p2align 8
.type _Z7zupdatePfS_fii,@function
_Z7zupdatePfS_fii:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b64 s[4:5], s[0:1], 0x14
v_and_b32_e32 v2, 0x3ff, v0
v_bfe_u32 v3, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s2, 0xffff
s_lshr_b32 s2, s2, 16
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_mad_u64_u32 v[0:1], null, s14, s3, v[2:3]
v_mad_u64_u32 v[1:2], null, s15, s2, v[3:4]
v_cmp_gt_i32_e32 vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_gt_i32_e64 s2, s5, v1
s_and_b32 s2, vcc_lo, s2
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_2
v_mul_lo_u32 v1, v1, s4
s_clause 0x1
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b32 s1, s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_lshl_u32 v0, v1, v0, 1
v_or_b32_e32 v2, 1, v0
v_ashrrev_i32_e32 v1, 31, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_ashrrev_i32_e32 v3, 31, v2
v_lshlrev_b64 v[0:1], 2, v[0:1]
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[2:3]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v4, vcc_lo, s4, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_ci_u32_e32 v5, vcc_lo, s5, v3, vcc_lo
v_add_co_u32 v6, vcc_lo, s4, v0
v_add_co_ci_u32_e32 v7, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s6, v0
s_clause 0x1
global_load_b32 v8, v[4:5], off
global_load_b32 v9, v[6:7], off
v_add_co_ci_u32_e32 v1, vcc_lo, s7, v1, vcc_lo
global_load_b32 v0, v[0:1], off
s_waitcnt vmcnt(2)
v_mul_f32_e32 v1, v8, v8
s_waitcnt vmcnt(1)
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
v_fmac_f32_e32 v1, v9, v9
s_waitcnt vmcnt(0)
v_fmac_f32_e32 v0, s1, v9
v_mul_f32_e32 v10, 0x4f800000, v1
v_cmp_gt_f32_e32 vcc_lo, 0xf800000, v1
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cndmask_b32_e32 v1, v1, v10, vcc_lo
v_sqrt_f32_e32 v10, v1
s_waitcnt_depctr 0xfff
v_add_nc_u32_e32 v11, -1, v10
v_add_nc_u32_e32 v12, 1, v10
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_fma_f32 v13, -v11, v10, v1
v_fma_f32 v14, -v12, v10, v1
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_ge_f32_e64 s0, 0, v13
v_cndmask_b32_e64 v10, v10, v11, s0
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_lt_f32_e64 s0, 0, v14
v_cndmask_b32_e64 v10, v10, v12, s0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_f32_e32 v11, 0x37800000, v10
v_cndmask_b32_e32 v10, v10, v11, vcc_lo
v_cmp_class_f32_e64 vcc_lo, v1, 0x260
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cndmask_b32_e32 v1, v10, v1, vcc_lo
v_fma_f32 v1, v1, s1, 1.0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_div_scale_f32 v10, null, v1, v1, 1.0
v_div_scale_f32 v13, vcc_lo, 1.0, v1, 1.0
v_rcp_f32_e32 v11, v10
s_waitcnt_depctr 0xfff
v_fma_f32 v12, -v10, v11, 1.0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fmac_f32_e32 v11, v12, v11
v_mul_f32_e32 v12, v13, v11
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f32 v14, -v10, v12, v13
v_fmac_f32_e32 v12, v14, v11
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f32 v10, -v10, v12, v13
v_div_fmas_f32 v10, v10, v11, v12
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_div_fixup_f32 v9, v10, v1, 1.0
v_mul_f32_e32 v10, v0, v9
v_add_co_u32 v0, vcc_lo, s6, v2
v_add_co_ci_u32_e32 v1, vcc_lo, s7, v3, vcc_lo
global_store_b32 v[6:7], v10, off
global_load_b32 v0, v[0:1], off
s_waitcnt vmcnt(0)
v_fmac_f32_e32 v0, s1, v8
s_delay_alu instid0(VALU_DEP_1)
v_mul_f32_e32 v0, v0, v9
global_store_b32 v[4:5], v0, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z7zupdatePfS_fii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 15
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z7zupdatePfS_fii, .Lfunc_end0-_Z7zupdatePfS_fii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z7zupdatePfS_fii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z7zupdatePfS_fii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 15
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0008a45d_00000000-6_zupdate.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z31__device_stub__Z7zupdatePfS_fiiPfS_fii
.type _Z31__device_stub__Z7zupdatePfS_fiiPfS_fii, @function
_Z31__device_stub__Z7zupdatePfS_fiiPfS_fii:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movss %xmm0, 12(%rsp)
movl %edx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
leaq 4(%rsp), %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z7zupdatePfS_fii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z31__device_stub__Z7zupdatePfS_fiiPfS_fii, .-_Z31__device_stub__Z7zupdatePfS_fiiPfS_fii
.globl _Z7zupdatePfS_fii
.type _Z7zupdatePfS_fii, @function
_Z7zupdatePfS_fii:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z31__device_stub__Z7zupdatePfS_fiiPfS_fii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z7zupdatePfS_fii, .-_Z7zupdatePfS_fii
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z7zupdatePfS_fii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z7zupdatePfS_fii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "zupdate.hip"
.globl _Z22__device_stub__zupdatePfS_fii # -- Begin function _Z22__device_stub__zupdatePfS_fii
.p2align 4, 0x90
.type _Z22__device_stub__zupdatePfS_fii,@function
_Z22__device_stub__zupdatePfS_fii: # @_Z22__device_stub__zupdatePfS_fii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movss %xmm0, 12(%rsp)
movl %edx, 8(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 4(%rsp), %rax
movq %rax, 112(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z7zupdatePfS_fii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z22__device_stub__zupdatePfS_fii, .Lfunc_end0-_Z22__device_stub__zupdatePfS_fii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z7zupdatePfS_fii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z7zupdatePfS_fii,@object # @_Z7zupdatePfS_fii
.section .rodata,"a",@progbits
.globl _Z7zupdatePfS_fii
.p2align 3, 0x0
_Z7zupdatePfS_fii:
.quad _Z22__device_stub__zupdatePfS_fii
.size _Z7zupdatePfS_fii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z7zupdatePfS_fii"
.size .L__unnamed_1, 18
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z22__device_stub__zupdatePfS_fii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z7zupdatePfS_fii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | /*
* Module to test CUDA module loading and execution.
* To be compiled with:
* nvcc -ptx module_test.cu
*/
#ifdef __cplusplus
extern "C" {
#endif
/// Sets the first N elements of array to value.
__global__ void testMemset(float* array, float value, int N){
int i = ( blockIdx.y*gridDim.x + blockIdx.x ) * blockDim.x + threadIdx.x;
if(i < N){
array[i] = value;
}
}
#ifdef __cplusplus
}
#endif | code for sm_80
Function : testMemset
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.Y ; /* 0x0000000000007919 */
/* 0x000e280000002600 */
/*0020*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e280000002500 */
/*0030*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000e620000002100 */
/*0040*/ IMAD R0, R0, c[0x0][0xc], R3 ; /* 0x0000030000007a24 */
/* 0x001fc800078e0203 */
/*0050*/ IMAD R0, R0, c[0x0][0x0], R5 ; /* 0x0000000000007a24 */
/* 0x002fca00078e0205 */
/*0060*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x16c], PT ; /* 0x00005b0000007a0c */
/* 0x000fda0003f06270 */
/*0070*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0080*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fe200000001ff */
/*0090*/ MOV R5, c[0x0][0x168] ; /* 0x00005a0000057a02 */
/* 0x000fe20000000f00 */
/*00a0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd00000000a00 */
/*00b0*/ IMAD.WIDE R2, R0, R3, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fca00078e0203 */
/*00c0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe2000c101904 */
/*00d0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00e0*/ BRA 0xe0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | /*
* Module to test CUDA module loading and execution.
* To be compiled with:
* nvcc -ptx module_test.cu
*/
#ifdef __cplusplus
extern "C" {
#endif
/// Sets the first N elements of array to value.
__global__ void testMemset(float* array, float value, int N){
int i = ( blockIdx.y*gridDim.x + blockIdx.x ) * blockDim.x + threadIdx.x;
if(i < N){
array[i] = value;
}
}
#ifdef __cplusplus
}
#endif | .file "tmpxft_0011bfb4_00000000-6_module_test.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z32__device_stub__Z10testMemsetPffiPffi
.type _Z32__device_stub__Z10testMemsetPffiPffi, @function
_Z32__device_stub__Z10testMemsetPffiPffi:
.LFB2051:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movss %xmm0, 4(%rsp)
movl %esi, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
leaq 4(%rsp), %rax
movq %rax, 88(%rsp)
movq %rsp, %rax
movq %rax, 96(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq testMemset(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z32__device_stub__Z10testMemsetPffiPffi, .-_Z32__device_stub__Z10testMemsetPffiPffi
.globl testMemset
.type testMemset, @function
testMemset:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z32__device_stub__Z10testMemsetPffiPffi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size testMemset, .-testMemset
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "testMemset"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq testMemset(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | /*
* Module to test CUDA module loading and execution.
* To be compiled with:
* nvcc -ptx module_test.cu
*/
#ifdef __cplusplus
extern "C" {
#endif
/// Sets the first N elements of array to value.
__global__ void testMemset(float* array, float value, int N){
int i = ( blockIdx.y*gridDim.x + blockIdx.x ) * blockDim.x + threadIdx.x;
if(i < N){
array[i] = value;
}
}
#ifdef __cplusplus
}
#endif | #include <hip/hip_runtime.h>
/*
* Module to test CUDA module loading and execution.
* To be compiled with:
* nvcc -ptx module_test.cu
*/
#ifdef __cplusplus
extern "C" {
#endif
/// Sets the first N elements of array to value.
__global__ void testMemset(float* array, float value, int N){
int i = ( blockIdx.y*gridDim.x + blockIdx.x ) * blockDim.x + threadIdx.x;
if(i < N){
array[i] = value;
}
}
#ifdef __cplusplus
}
#endif |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
/*
* Module to test CUDA module loading and execution.
* To be compiled with:
* nvcc -ptx module_test.cu
*/
#ifdef __cplusplus
extern "C" {
#endif
/// Sets the first N elements of array to value.
__global__ void testMemset(float* array, float value, int N){
int i = ( blockIdx.y*gridDim.x + blockIdx.x ) * blockDim.x + threadIdx.x;
if(i < N){
array[i] = value;
}
}
#ifdef __cplusplus
}
#endif | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected testMemset
.globl testMemset
.p2align 8
.type testMemset,@function
testMemset:
s_clause 0x1
s_load_b64 s[2:3], s[0:1], 0xc
s_load_b32 s4, s[0:1], 0x1c
s_waitcnt lgkmcnt(0)
s_mul_i32 s3, s3, s15
s_and_b32 s4, s4, 0xffff
s_add_i32 s3, s3, s14
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s3, s4, v[0:1]
v_cmp_gt_i32_e32 vcc_lo, s2, v1
s_and_saveexec_b32 s2, vcc_lo
s_cbranch_execz .LBB0_2
s_clause 0x1
s_load_b64 s[2:3], s[0:1], 0x0
s_load_b32 s0, s[0:1], 0x8
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s2, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
v_mov_b32_e32 v2, s0
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel testMemset
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size testMemset, .Lfunc_end0-testMemset
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 12
.size: 4
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: testMemset
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: testMemset.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
/*
* Module to test CUDA module loading and execution.
* To be compiled with:
* nvcc -ptx module_test.cu
*/
#ifdef __cplusplus
extern "C" {
#endif
/// Sets the first N elements of array to value.
__global__ void testMemset(float* array, float value, int N){
int i = ( blockIdx.y*gridDim.x + blockIdx.x ) * blockDim.x + threadIdx.x;
if(i < N){
array[i] = value;
}
}
#ifdef __cplusplus
}
#endif | .text
.file "module_test.hip"
.globl __device_stub__testMemset # -- Begin function __device_stub__testMemset
.p2align 4, 0x90
.type __device_stub__testMemset,@function
__device_stub__testMemset: # @__device_stub__testMemset
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movss %xmm0, 4(%rsp)
movl %esi, (%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 4(%rsp), %rax
movq %rax, 72(%rsp)
movq %rsp, %rax
movq %rax, 80(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 64(%rsp), %r9
movl $testMemset, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size __device_stub__testMemset, .Lfunc_end0-__device_stub__testMemset
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $testMemset, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type testMemset,@object # @testMemset
.section .rodata,"a",@progbits
.globl testMemset
.p2align 3, 0x0
testMemset:
.quad __device_stub__testMemset
.size testMemset, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "testMemset"
.size .L__unnamed_1, 11
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __device_stub__testMemset
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym testMemset
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : testMemset
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.Y ; /* 0x0000000000007919 */
/* 0x000e280000002600 */
/*0020*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e280000002500 */
/*0030*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000e620000002100 */
/*0040*/ IMAD R0, R0, c[0x0][0xc], R3 ; /* 0x0000030000007a24 */
/* 0x001fc800078e0203 */
/*0050*/ IMAD R0, R0, c[0x0][0x0], R5 ; /* 0x0000000000007a24 */
/* 0x002fca00078e0205 */
/*0060*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x16c], PT ; /* 0x00005b0000007a0c */
/* 0x000fda0003f06270 */
/*0070*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0080*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fe200000001ff */
/*0090*/ MOV R5, c[0x0][0x168] ; /* 0x00005a0000057a02 */
/* 0x000fe20000000f00 */
/*00a0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd00000000a00 */
/*00b0*/ IMAD.WIDE R2, R0, R3, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fca00078e0203 */
/*00c0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe2000c101904 */
/*00d0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00e0*/ BRA 0xe0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected testMemset
.globl testMemset
.p2align 8
.type testMemset,@function
testMemset:
s_clause 0x1
s_load_b64 s[2:3], s[0:1], 0xc
s_load_b32 s4, s[0:1], 0x1c
s_waitcnt lgkmcnt(0)
s_mul_i32 s3, s3, s15
s_and_b32 s4, s4, 0xffff
s_add_i32 s3, s3, s14
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s3, s4, v[0:1]
v_cmp_gt_i32_e32 vcc_lo, s2, v1
s_and_saveexec_b32 s2, vcc_lo
s_cbranch_execz .LBB0_2
s_clause 0x1
s_load_b64 s[2:3], s[0:1], 0x0
s_load_b32 s0, s[0:1], 0x8
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s2, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
v_mov_b32_e32 v2, s0
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel testMemset
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size testMemset, .Lfunc_end0-testMemset
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 12
.size: 4
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: testMemset
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: testMemset.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0011bfb4_00000000-6_module_test.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z32__device_stub__Z10testMemsetPffiPffi
.type _Z32__device_stub__Z10testMemsetPffiPffi, @function
_Z32__device_stub__Z10testMemsetPffiPffi:
.LFB2051:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movss %xmm0, 4(%rsp)
movl %esi, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
leaq 4(%rsp), %rax
movq %rax, 88(%rsp)
movq %rsp, %rax
movq %rax, 96(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq testMemset(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z32__device_stub__Z10testMemsetPffiPffi, .-_Z32__device_stub__Z10testMemsetPffiPffi
.globl testMemset
.type testMemset, @function
testMemset:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z32__device_stub__Z10testMemsetPffiPffi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size testMemset, .-testMemset
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "testMemset"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq testMemset(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "module_test.hip"
.globl __device_stub__testMemset # -- Begin function __device_stub__testMemset
.p2align 4, 0x90
.type __device_stub__testMemset,@function
__device_stub__testMemset: # @__device_stub__testMemset
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movss %xmm0, 4(%rsp)
movl %esi, (%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 4(%rsp), %rax
movq %rax, 72(%rsp)
movq %rsp, %rax
movq %rax, 80(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 64(%rsp), %r9
movl $testMemset, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size __device_stub__testMemset, .Lfunc_end0-__device_stub__testMemset
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $testMemset, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type testMemset,@object # @testMemset
.section .rodata,"a",@progbits
.globl testMemset
.p2align 3, 0x0
testMemset:
.quad __device_stub__testMemset
.size testMemset, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "testMemset"
.size .L__unnamed_1, 11
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __device_stub__testMemset
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym testMemset
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | /*********************************************************
* DESCRIPTION: *
* Serial Concurrent Wave Equation - C Version *
* This program implements the concurrent wave equation*
**********************************************************/
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<time.h>
#define MAXPOINTS 1000000
#define MINPOINTS 20
#define MAXSTEPS 1000000
#define PI 3.14159265
void check_param(void);
void init_line(void);
void update(void);
void printfinal(void);
int nsteps, /*number of time steps*/
tpoints, /*total points along string*/
rcode; /*generic return code*/
float values[MAXPOINTS + 2], /*values at time t*/
oldval[MAXPOINTS + 2], /*values at time (t-dt)*/
newval[MAXPOINTS + 2]; /*values at time (t+dt)*/
float v[MAXPOINTS + 2], /*serial used for comparing answers with parallel*/
o[MAXPOINTS + 2],
n[MAXPOINTS + 2];
/*********************************************************
* Check input values from parameters *
**********************************************************/
void check_param(void){
char tchar[20];
/*check number of points, number of iterations*/
while((tpoints < MINPOINTS) || (tpoints > MAXPOINTS)){
printf("Enter number of points along vibrating string [%d-%d]: ");
scanf("%s", tchar);
tpoints = atoi(tchar);
if((tpoints < MINPOINTS) || (tpoints > MAXPOINTS)){
printf("Invalid. Please enter value between %d and %d\n", MINPOINTS, MAXPOINTS);
}
}
while((nsteps < 1) || (nsteps > MAXSTEPS)){
printf("Enter number of time steps [1-%d]: ", MAXSTEPS);
scanf("%s", tchar);
nsteps = atoi(tchar);
if((nsteps < 1) || (nsteps > MAXSTEPS)){
printf("Invalid. Please enter value between 1 and %d\n", MAXSTEPS);
}
}
printf("Using points = %d, steps = %d\n", tpoints, nsteps);
}
/*********************************************************
* Initialize points on line *
**********************************************************/
void init_line(void){
int i, j;
float x, fac, k, tmp;
/*Calculate initial values based on sine curve*/
fac = 2.0 * PI;
k = 0.0;
tmp = tpoints - 1;
for(j = 1; j <= tpoints; j++){
x = k/tmp;
values[j] = sin(fac * x);
v[j] = values[j];
k = k + 1.0;
}
/*Initialize old values array*/
for(i = 1; i <= tpoints; i++){
oldval[i] = values[i];
o[i] = v[i];
}
}
/*********************************************************
* Calculate new values using wave equation *
**********************************************************/
__global__ void do_math_kernel(float *val_d, float *old_d, float *new_d, int num_of_steps, int num_of_points){
float dtime, c, dx, tau, sqtau;
dtime = 0.3;
c = 1.0;
dx = 1.0;
tau = (c * dtime / dx);
sqtau = tau * tau;
__shared__ float val_ds[1024];
__shared__ float old_ds[1024];
__shared__ float new_ds[1024];
int tx = threadIdx.x;
int index = blockIdx.x * blockDim.x + threadIdx.x;
val_ds[tx] = val_d[index];
old_ds[tx] = old_d[index];
for(int i = 1; i <= num_of_steps; i++){
/*This part needs to access elements from global memory of GPU*/
/*if(index == 0 || index == num_of_points - 1) new_d[index] = 0.0;
else new_d[index] = (2.0 * val_d[index]) - old_d[index] + (sqtau * (-2.0) * val_d[index]);
old_d[index] = val_d[index];
val_d[index] = new_d[index];*/
/*This part accesses elements from shared memory of GPU -> faster*/
/*if(index == 0 || index == num_of_points - 1) new_ds[tx] = 0.0;
else new_ds[tx] = (2.0 * val_ds[tx]) - old_ds[tx] + (sqtau * (-2.0) * val_ds[tx]);
old_ds[tx] = val_ds[tx];
val_ds[tx] = new_ds[tx];*/
/*This part only takes values[2~tpoints-1] total tpoins-2 points from CPU to GPU in order to reduce branch overhead*/
new_ds[tx] = (2.0 * val_ds[tx]) - old_ds[tx] + (sqtau * (-2.0) * val_ds[tx]);
old_ds[tx] = val_ds[tx];
val_ds[tx] = new_ds[tx];
}
__syncthreads();
val_d[index] = val_ds[tx];
}
void do_math(int i){
float dtime, c, dx, tau, sqtau;
dtime = 0.3;
c = 1.0;
dx = 1.0;
tau = (c * dtime / dx);
sqtau = tau * tau;
newval[i] = (2.0 * values[i]) - oldval[i] + (sqtau * (-2.0) * values[i]);
//n[i] = (2.0 * v[i]) - o[i] + (sqtau * (-2.0) * v[i]);
}
/***********************************************************
* Update all values along line a specified number of times*
***********************************************************/
void updateOnDevice(){
//int size = tpoints * sizeof(float);
int size = (tpoints - 2) * sizeof(float);
float *val_d, *old_d, *new_d; //memory on device
/*1.Allocate device memory and move initiail values[] and oldval[] to GPU*/
cudaMalloc(&val_d, size);
//cudaMemcpy(val_d, values+1, size, cudaMemcpyHostToDevice);
cudaMemcpy(val_d, values+2, size, cudaMemcpyHostToDevice);
cudaMalloc(&old_d, size);
//cudaMemcpy(old_d, oldval+1, size, cudaMemcpyHostToDevice);
cudaMemcpy(old_d, oldval+2, size, cudaMemcpyHostToDevice);
cudaMalloc(&new_d, size);
/*2.Invoke kernel function, each thread calculates a value[] element*/
int threads_per_block, blocks_per_grid = tpoints/1024 + 1;
if(tpoints > 1024) threads_per_block = 1024;
else threads_per_block = tpoints;
dim3 dimBlock(threads_per_block);
dim3 dimGrid(blocks_per_grid);
do_math_kernel<<<dimGrid,dimBlock>>>(val_d, old_d, new_d, nsteps, tpoints);
/*3.Read final results from GPU to CPU*/
//cudaMemcpy(values+1, val_d, size, cudaMemcpyDeviceToHost);
cudaMemcpy(values+2, val_d, size, cudaMemcpyDeviceToHost);
cudaFree(val_d); cudaFree(old_d); cudaFree(new_d);
}
void update(){
int i, j;
/*Update values for each time step*/
for(i = 1; i <= nsteps; i++){
/*Update points along line for this time step*/
for(j = 1; j <= tpoints; j++){
/*global endpoints*/
if((j == 1) || (j == tpoints)){
newval[j] = 0.0;
//n[j] = 0.0;
}
else do_math(j);
}
/*Update old values with new values*/
for(j = 1; j <= tpoints; j++){
oldval[j] = values[j];
values[j] = newval[j];
/*o[j] = v[j];
v[j] = n[j];*/
}
}
}
/**********************************************************
* Print final results *
**********************************************************/
void printfinal(){
int i;
for(i = 1; i <= tpoints; i++){
printf("%6.4f ", values[i]);
if(i%10 == 0) printf("\n");
}
}
/**********************************************************
* Check serial and parallel answers *
**********************************************************/
void check_answer(){
int wrong = 0, num = 0;
for(int i = 1; i <= tpoints; i++){
if(values[i]!=v[i]){
wrong = 1;
num++;
}
}
if(wrong == 0) printf("right\n");
else printf("%d are wrong\n", num);
/*In command line ./ cuda_wave.out > [filename] to pipe output to the file
then use diff file1 file2 to see if there is any difference*/
}
/*********************************************************
* Main Program *
**********************************************************/
int main(int argc, char *argv[]){
sscanf(argv[1], "%d", &tpoints);
sscanf(argv[2], "%d", &nsteps);
check_param();
printf("Initializing points on the line...\n");
init_line();
//printfinal();
printf("Updating all points for all time steps...\n");
//update();
updateOnDevice();
printf("Printing final results...\n");
printfinal();
printf("\nDone.\n\n");
return 0;
} | code for sm_80
Function : _Z14do_math_kernelPfS_S_ii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e220000002500 */
/*0020*/ IMAD.MOV.U32 R7, RZ, RZ, 0x4 ; /* 0x00000004ff077424 */
/* 0x000fe200078e00ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R2, R3, c[0x0][0x0], R0 ; /* 0x0000000003027a24 */
/* 0x001fc800078e0200 */
/*0060*/ IMAD.WIDE R4, R2, R7, c[0x0][0x168] ; /* 0x00005a0002047625 */
/* 0x000fc800078e0207 */
/*0070*/ IMAD.WIDE R2, R2, R7, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fe400078e0207 */
/*0080*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea8000c1e1900 */
/*0090*/ LDG.E R7, [R2.64] ; /* 0x0000000402077981 */
/* 0x000ee2000c1e1900 */
/*00a0*/ IMAD.MOV.U32 R6, RZ, RZ, c[0x0][0x178] ; /* 0x00005e00ff067624 */
/* 0x000fca00078e00ff */
/*00b0*/ ISETP.GE.AND P0, PT, R6, 0x1, PT ; /* 0x000000010600780c */
/* 0x000fe20003f06270 */
/*00c0*/ STS [R0.X4+0x1000], R5 ; /* 0x0010000500007388 */
/* 0x0041e80000004800 */
/*00d0*/ STS [R0.X4], R7 ; /* 0x0000000700007388 */
/* 0x0081f00000004800 */
/*00e0*/ @!P0 BRA 0x3c0 ; /* 0x000002d000008947 */
/* 0x000fea0003800000 */
/*00f0*/ IADD3 R8, R6.reuse, -0x1, RZ ; /* 0xffffffff06087810 */
/* 0x040fe20007ffe0ff */
/*0100*/ IMAD.MOV.U32 R17, RZ, RZ, R7 ; /* 0x000000ffff117224 */
/* 0x000fe200078e0007 */
/*0110*/ LOP3.LUT R4, R6, 0x3, RZ, 0xc0, !PT ; /* 0x0000000306047812 */
/* 0x000fe200078ec0ff */
/*0120*/ IMAD.MOV.U32 R15, RZ, RZ, R5 ; /* 0x000000ffff0f7224 */
/* 0x000fe200078e0005 */
/*0130*/ ISETP.GE.U32.AND P1, PT, R8, 0x3, PT ; /* 0x000000030800780c */
/* 0x000fc40003f26070 */
/*0140*/ ISETP.NE.AND P0, PT, R4, RZ, PT ; /* 0x000000ff0400720c */
/* 0x000fd60003f05270 */
/*0150*/ @!P1 BRA 0x2f0 ; /* 0x0000019000009947 */
/* 0x000fea0003800000 */
/*0160*/ IADD3 R5, -R4, c[0x0][0x178], RZ ; /* 0x00005e0004057a10 */
/* 0x001fc60007ffe1ff */
/*0170*/ F2F.F64.F32 R6, R17 ; /* 0x0000001100067310 */
/* 0x003e220000201800 */
/*0180*/ IADD3 R5, R5, -0x4, RZ ; /* 0xfffffffc05057810 */
/* 0x000fc80007ffe0ff */
/*0190*/ ISETP.NE.AND P1, PT, R5, RZ, PT ; /* 0x000000ff0500720c */
/* 0x000fc60003f25270 */
/*01a0*/ F2F.F64.F32 R10, R15 ; /* 0x0000000f000a7310 */
/* 0x000e620000201800 */
/*01b0*/ DADD R8, R6, R6 ; /* 0x0000000006087229 */
/* 0x001e4c0000000006 */
/*01c0*/ DADD R8, R8, -R10 ; /* 0x0000000008087229 */
/* 0x002e0c000000080a */
/*01d0*/ DFMA R8, R6, c[0x2][0x0], R8 ; /* 0x0080000006087a2b */
/* 0x001e0c0000000008 */
/*01e0*/ F2F.F32.F64 R10, R8 ; /* 0x00000008000a7310 */
/* 0x001e300000301000 */
/*01f0*/ F2F.F64.F32 R10, R10 ; /* 0x0000000a000a7310 */
/* 0x001e240000201800 */
/*0200*/ DADD R12, R10, R10 ; /* 0x000000000a0c7229 */
/* 0x001e0c000000000a */
/*0210*/ DADD R12, -R6, R12 ; /* 0x00000000060c7229 */
/* 0x001e0c000000010c */
/*0220*/ DFMA R12, R10, c[0x2][0x0], R12 ; /* 0x008000000a0c7a2b */
/* 0x001e0c000000000c */
/*0230*/ F2F.F32.F64 R6, R12 ; /* 0x0000000c00067310 */
/* 0x001e300000301000 */
/*0240*/ F2F.F64.F32 R6, R6 ; /* 0x0000000600067310 */
/* 0x001e240000201800 */
/*0250*/ DADD R14, R6, R6 ; /* 0x00000000060e7229 */
/* 0x001e0c0000000006 */
/*0260*/ DADD R14, -R10, R14 ; /* 0x000000000a0e7229 */
/* 0x001e0c000000010e */
/*0270*/ DFMA R14, R6, c[0x2][0x0], R14 ; /* 0x00800000060e7a2b */
/* 0x001e14000000000e */
/*0280*/ F2F.F32.F64 R15, R14 ; /* 0x0000000e000f7310 */
/* 0x001e300000301000 */
/*0290*/ F2F.F64.F32 R8, R15 ; /* 0x0000000f00087310 */
/* 0x001e240000201800 */
/*02a0*/ DADD R10, R8, R8 ; /* 0x00000000080a7229 */
/* 0x001e0c0000000008 */
/*02b0*/ DADD R10, -R6, R10 ; /* 0x00000000060a7229 */
/* 0x001e0c000000010a */
/*02c0*/ DFMA R10, R8, c[0x2][0x0], R10 ; /* 0x00800000080a7a2b */
/* 0x001e0c000000000a */
/*02d0*/ F2F.F32.F64 R17, R10 ; /* 0x0000000a00117310 */
/* 0x0010620000301000 */
/*02e0*/ @P1 BRA 0x170 ; /* 0xfffffe8000001947 */
/* 0x000fea000383ffff */
/*02f0*/ @!P0 BRA 0x3a0 ; /* 0x000000a000008947 */
/* 0x000fea0003800000 */
/*0300*/ F2F.F64.F32 R8, R17 ; /* 0x0000001100087310 */
/* 0x002e620000201800 */
/*0310*/ IADD3 R4, R4, -0x1, RZ ; /* 0xffffffff04047810 */
/* 0x000fc80007ffe0ff */
/*0320*/ ISETP.NE.AND P0, PT, R4, RZ, PT ; /* 0x000000ff0400720c */
/* 0x000fc60003f05270 */
/*0330*/ F2F.F64.F32 R6, R15 ; /* 0x0000000f00067310 */
/* 0x0010a20000201800 */
/*0340*/ DADD R10, R8, R8 ; /* 0x00000000080a7229 */
/* 0x002ea20000000008 */
/*0350*/ IMAD.MOV.U32 R15, RZ, RZ, R17 ; /* 0x000000ffff0f7224 */
/* 0x001fca00078e0011 */
/*0360*/ DADD R6, R10, -R6 ; /* 0x000000000a067229 */
/* 0x004e0c0000000806 */
/*0370*/ DFMA R6, R8, c[0x2][0x0], R6 ; /* 0x0080000008067a2b */
/* 0x001e0c0000000006 */
/*0380*/ F2F.F32.F64 R17, R6 ; /* 0x0000000600117310 */
/* 0x0010620000301000 */
/*0390*/ @P0 BRA 0x300 ; /* 0xffffff6000000947 */
/* 0x000fea000383ffff */
/*03a0*/ STS [R0.X4], R17 ; /* 0x0000001100007388 */
/* 0x0023e80000004800 */
/*03b0*/ STS [R0.X4+0x1000], R15 ; /* 0x0010000f00007388 */
/* 0x0003e40000004800 */
/*03c0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*03d0*/ LDS R5, [R0.X4] ; /* 0x0000000000057984 */
/* 0x001e280000004800 */
/*03e0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x001fe2000c101904 */
/*03f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0400*/ BRA 0x400; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0410*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0420*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0430*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0440*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0450*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0460*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0470*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0480*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0490*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | /*********************************************************
* DESCRIPTION: *
* Serial Concurrent Wave Equation - C Version *
* This program implements the concurrent wave equation*
**********************************************************/
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<time.h>
#define MAXPOINTS 1000000
#define MINPOINTS 20
#define MAXSTEPS 1000000
#define PI 3.14159265
void check_param(void);
void init_line(void);
void update(void);
void printfinal(void);
int nsteps, /*number of time steps*/
tpoints, /*total points along string*/
rcode; /*generic return code*/
float values[MAXPOINTS + 2], /*values at time t*/
oldval[MAXPOINTS + 2], /*values at time (t-dt)*/
newval[MAXPOINTS + 2]; /*values at time (t+dt)*/
float v[MAXPOINTS + 2], /*serial used for comparing answers with parallel*/
o[MAXPOINTS + 2],
n[MAXPOINTS + 2];
/*********************************************************
* Check input values from parameters *
**********************************************************/
void check_param(void){
char tchar[20];
/*check number of points, number of iterations*/
while((tpoints < MINPOINTS) || (tpoints > MAXPOINTS)){
printf("Enter number of points along vibrating string [%d-%d]: ");
scanf("%s", tchar);
tpoints = atoi(tchar);
if((tpoints < MINPOINTS) || (tpoints > MAXPOINTS)){
printf("Invalid. Please enter value between %d and %d\n", MINPOINTS, MAXPOINTS);
}
}
while((nsteps < 1) || (nsteps > MAXSTEPS)){
printf("Enter number of time steps [1-%d]: ", MAXSTEPS);
scanf("%s", tchar);
nsteps = atoi(tchar);
if((nsteps < 1) || (nsteps > MAXSTEPS)){
printf("Invalid. Please enter value between 1 and %d\n", MAXSTEPS);
}
}
printf("Using points = %d, steps = %d\n", tpoints, nsteps);
}
/*********************************************************
* Initialize points on line *
**********************************************************/
void init_line(void){
int i, j;
float x, fac, k, tmp;
/*Calculate initial values based on sine curve*/
fac = 2.0 * PI;
k = 0.0;
tmp = tpoints - 1;
for(j = 1; j <= tpoints; j++){
x = k/tmp;
values[j] = sin(fac * x);
v[j] = values[j];
k = k + 1.0;
}
/*Initialize old values array*/
for(i = 1; i <= tpoints; i++){
oldval[i] = values[i];
o[i] = v[i];
}
}
/*********************************************************
* Calculate new values using wave equation *
**********************************************************/
__global__ void do_math_kernel(float *val_d, float *old_d, float *new_d, int num_of_steps, int num_of_points){
float dtime, c, dx, tau, sqtau;
dtime = 0.3;
c = 1.0;
dx = 1.0;
tau = (c * dtime / dx);
sqtau = tau * tau;
__shared__ float val_ds[1024];
__shared__ float old_ds[1024];
__shared__ float new_ds[1024];
int tx = threadIdx.x;
int index = blockIdx.x * blockDim.x + threadIdx.x;
val_ds[tx] = val_d[index];
old_ds[tx] = old_d[index];
for(int i = 1; i <= num_of_steps; i++){
/*This part needs to access elements from global memory of GPU*/
/*if(index == 0 || index == num_of_points - 1) new_d[index] = 0.0;
else new_d[index] = (2.0 * val_d[index]) - old_d[index] + (sqtau * (-2.0) * val_d[index]);
old_d[index] = val_d[index];
val_d[index] = new_d[index];*/
/*This part accesses elements from shared memory of GPU -> faster*/
/*if(index == 0 || index == num_of_points - 1) new_ds[tx] = 0.0;
else new_ds[tx] = (2.0 * val_ds[tx]) - old_ds[tx] + (sqtau * (-2.0) * val_ds[tx]);
old_ds[tx] = val_ds[tx];
val_ds[tx] = new_ds[tx];*/
/*This part only takes values[2~tpoints-1] total tpoins-2 points from CPU to GPU in order to reduce branch overhead*/
new_ds[tx] = (2.0 * val_ds[tx]) - old_ds[tx] + (sqtau * (-2.0) * val_ds[tx]);
old_ds[tx] = val_ds[tx];
val_ds[tx] = new_ds[tx];
}
__syncthreads();
val_d[index] = val_ds[tx];
}
void do_math(int i){
float dtime, c, dx, tau, sqtau;
dtime = 0.3;
c = 1.0;
dx = 1.0;
tau = (c * dtime / dx);
sqtau = tau * tau;
newval[i] = (2.0 * values[i]) - oldval[i] + (sqtau * (-2.0) * values[i]);
//n[i] = (2.0 * v[i]) - o[i] + (sqtau * (-2.0) * v[i]);
}
/***********************************************************
* Update all values along line a specified number of times*
***********************************************************/
void updateOnDevice(){
//int size = tpoints * sizeof(float);
int size = (tpoints - 2) * sizeof(float);
float *val_d, *old_d, *new_d; //memory on device
/*1.Allocate device memory and move initiail values[] and oldval[] to GPU*/
cudaMalloc(&val_d, size);
//cudaMemcpy(val_d, values+1, size, cudaMemcpyHostToDevice);
cudaMemcpy(val_d, values+2, size, cudaMemcpyHostToDevice);
cudaMalloc(&old_d, size);
//cudaMemcpy(old_d, oldval+1, size, cudaMemcpyHostToDevice);
cudaMemcpy(old_d, oldval+2, size, cudaMemcpyHostToDevice);
cudaMalloc(&new_d, size);
/*2.Invoke kernel function, each thread calculates a value[] element*/
int threads_per_block, blocks_per_grid = tpoints/1024 + 1;
if(tpoints > 1024) threads_per_block = 1024;
else threads_per_block = tpoints;
dim3 dimBlock(threads_per_block);
dim3 dimGrid(blocks_per_grid);
do_math_kernel<<<dimGrid,dimBlock>>>(val_d, old_d, new_d, nsteps, tpoints);
/*3.Read final results from GPU to CPU*/
//cudaMemcpy(values+1, val_d, size, cudaMemcpyDeviceToHost);
cudaMemcpy(values+2, val_d, size, cudaMemcpyDeviceToHost);
cudaFree(val_d); cudaFree(old_d); cudaFree(new_d);
}
void update(){
int i, j;
/*Update values for each time step*/
for(i = 1; i <= nsteps; i++){
/*Update points along line for this time step*/
for(j = 1; j <= tpoints; j++){
/*global endpoints*/
if((j == 1) || (j == tpoints)){
newval[j] = 0.0;
//n[j] = 0.0;
}
else do_math(j);
}
/*Update old values with new values*/
for(j = 1; j <= tpoints; j++){
oldval[j] = values[j];
values[j] = newval[j];
/*o[j] = v[j];
v[j] = n[j];*/
}
}
}
/**********************************************************
* Print final results *
**********************************************************/
void printfinal(){
int i;
for(i = 1; i <= tpoints; i++){
printf("%6.4f ", values[i]);
if(i%10 == 0) printf("\n");
}
}
/**********************************************************
* Check serial and parallel answers *
**********************************************************/
void check_answer(){
int wrong = 0, num = 0;
for(int i = 1; i <= tpoints; i++){
if(values[i]!=v[i]){
wrong = 1;
num++;
}
}
if(wrong == 0) printf("right\n");
else printf("%d are wrong\n", num);
/*In command line ./ cuda_wave.out > [filename] to pipe output to the file
then use diff file1 file2 to see if there is any difference*/
}
/*********************************************************
* Main Program *
**********************************************************/
int main(int argc, char *argv[]){
sscanf(argv[1], "%d", &tpoints);
sscanf(argv[2], "%d", &nsteps);
check_param();
printf("Initializing points on the line...\n");
init_line();
//printfinal();
printf("Updating all points for all time steps...\n");
//update();
updateOnDevice();
printf("Printing final results...\n");
printfinal();
printf("\nDone.\n\n");
return 0;
} | .file "tmpxft_0015fe2f_00000000-6_wave.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2067:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2067:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "Enter number of points along vibrating string [%d-%d]: "
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "%s"
.section .rodata.str1.8
.align 8
.LC2:
.string "Invalid. Please enter value between %d and %d\n"
.align 8
.LC3:
.string "Enter number of time steps [1-%d]: "
.align 8
.LC4:
.string "Invalid. Please enter value between 1 and %d\n"
.align 8
.LC5:
.string "Using points = %d, steps = %d\n"
.text
.globl _Z11check_paramv
.type _Z11check_paramv, @function
_Z11check_paramv:
.LFB2057:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $32, %rsp
.cfi_def_cfa_offset 64
movq %fs:40, %rax
movq %rax, 24(%rsp)
xorl %eax, %eax
leaq .LC0(%rip), %r12
movq %rsp, %rbx
leaq .LC1(%rip), %rbp
.L5:
movl tpoints(%rip), %eax
subl $20, %eax
cmpl $999980, %eax
jbe .L14
movq %r12, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %rbx, %rsi
movq %rbp, %rdi
movl $0, %eax
call __isoc23_scanf@PLT
movl $10, %edx
movl $0, %esi
movq %rbx, %rdi
call __isoc23_strtol@PLT
movl %eax, tpoints(%rip)
subl $20, %eax
cmpl $999980, %eax
jbe .L5
movl $1000000, %ecx
movl $20, %edx
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L5
.L14:
leaq .LC3(%rip), %r12
movq %rsp, %rbx
leaq .LC1(%rip), %rbp
.L8:
movl nsteps(%rip), %ecx
leal -1(%rcx), %eax
cmpl $999999, %eax
jbe .L15
movl $1000000, %edx
movq %r12, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %rbx, %rsi
movq %rbp, %rdi
movl $0, %eax
call __isoc23_scanf@PLT
movl $10, %edx
movl $0, %esi
movq %rbx, %rdi
call __isoc23_strtol@PLT
movl %eax, nsteps(%rip)
subl $1, %eax
cmpl $999999, %eax
jbe .L8
movl $1000000, %edx
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L8
.L15:
movl tpoints(%rip), %edx
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 24(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $32, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L16:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size _Z11check_paramv, .-_Z11check_paramv
.globl _Z9init_linev
.type _Z9init_linev, @function
_Z9init_linev:
.LFB2058:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $16, %rsp
.cfi_def_cfa_offset 64
movl tpoints(%rip), %eax
leal -1(%rax), %edx
pxor %xmm3, %xmm3
cvtsi2ssl %edx, %xmm3
movss %xmm3, 12(%rsp)
testl %eax, %eax
jle .L17
leaq 4+values(%rip), %rbx
leaq 4+v(%rip), %r12
leal -1(%rax), %r13d
leaq 4(%rbx), %rax
leaq (%rax,%r13,4), %r14
movl $0x00000000, %ebp
.L19:
movd %ebp, %xmm0
divss 12(%rsp), %xmm0
mulss .LC7(%rip), %xmm0
call sinf@PLT
movss %xmm0, (%rbx)
movss %xmm0, (%r12)
movd %ebp, %xmm1
addss .LC8(%rip), %xmm1
movd %xmm1, %ebp
addq $4, %rbx
addq $4, %r12
cmpq %r14, %rbx
jne .L19
addq $2, %r13
movl $1, %eax
leaq oldval(%rip), %rdi
leaq values(%rip), %rsi
leaq o(%rip), %rcx
leaq v(%rip), %rdx
.L20:
movss (%rsi,%rax,4), %xmm0
movss %xmm0, (%rdi,%rax,4)
movss (%rdx,%rax,4), %xmm0
movss %xmm0, (%rcx,%rax,4)
addq $1, %rax
cmpq %r13, %rax
jne .L20
.L17:
addq $16, %rsp
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2058:
.size _Z9init_linev, .-_Z9init_linev
.globl _Z7do_mathi
.type _Z7do_mathi, @function
_Z7do_mathi:
.LFB2059:
.cfi_startproc
endbr64
movslq %edi, %rdi
leaq values(%rip), %rax
pxor %xmm1, %xmm1
cvtss2sd (%rax,%rdi,4), %xmm1
movapd %xmm1, %xmm0
addsd %xmm1, %xmm0
leaq oldval(%rip), %rax
pxor %xmm2, %xmm2
cvtss2sd (%rax,%rdi,4), %xmm2
subsd %xmm2, %xmm0
mulsd .LC9(%rip), %xmm1
addsd %xmm1, %xmm0
cvtsd2ss %xmm0, %xmm0
leaq newval(%rip), %rax
movss %xmm0, (%rax,%rdi,4)
ret
.cfi_endproc
.LFE2059:
.size _Z7do_mathi, .-_Z7do_mathi
.globl _Z6updatev
.type _Z6updatev, @function
_Z6updatev:
.LFB2061:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
movl $1, %r14d
leaq newval(%rip), %r12
leaq oldval(%rip), %r13
leaq values(%rip), %rbp
cmpl $0, nsteps(%rip)
jg .L26
.L25:
popq %rbx
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L36:
.cfi_restore_state
movl $0x00000000, (%r12,%rbx,4)
.L30:
movl tpoints(%rip), %eax
addq $1, %rbx
cmpl %ebx, %eax
jl .L42
.L31:
cmpl %ebx, %eax
je .L36
cmpl $1, %ebx
je .L36
movl %ebx, %edi
call _Z7do_mathi
jmp .L30
.L42:
testl %eax, %eax
jle .L32
leal 1(%rax), %edx
movl $1, %eax
.L33:
movss 0(%rbp,%rax,4), %xmm0
movss %xmm0, 0(%r13,%rax,4)
movss (%r12,%rax,4), %xmm0
movss %xmm0, 0(%rbp,%rax,4)
addq $1, %rax
cmpq %rdx, %rax
jne .L33
.L32:
addl $1, %r14d
cmpl %r14d, nsteps(%rip)
jl .L25
.L26:
movl tpoints(%rip), %eax
movl $1, %ebx
testl %eax, %eax
jg .L31
jmp .L32
.cfi_endproc
.LFE2061:
.size _Z6updatev, .-_Z6updatev
.section .rodata.str1.1
.LC10:
.string "%6.4f "
.LC11:
.string "\n"
.text
.globl _Z10printfinalv
.type _Z10printfinalv, @function
_Z10printfinalv:
.LFB2062:
.cfi_startproc
endbr64
cmpl $0, tpoints(%rip)
jle .L49
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $8, %rsp
.cfi_def_cfa_offset 48
movl $1, %ebx
leaq values(%rip), %r12
leaq .LC10(%rip), %rbp
leaq .LC11(%rip), %r13
jmp .L46
.L45:
addq $1, %rbx
cmpl %ebx, tpoints(%rip)
jl .L52
.L46:
pxor %xmm0, %xmm0
cvtss2sd (%r12,%rbx,4), %xmm0
movq %rbp, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movslq %ebx, %rax
imulq $1717986919, %rax, %rax
sarq $34, %rax
movl %ebx, %edx
sarl $31, %edx
subl %edx, %eax
leal (%rax,%rax,4), %eax
addl %eax, %eax
cmpl %ebx, %eax
jne .L45
movq %r13, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L45
.L52:
addq $8, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L49:
.cfi_restore 3
.cfi_restore 6
.cfi_restore 12
.cfi_restore 13
ret
.cfi_endproc
.LFE2062:
.size _Z10printfinalv, .-_Z10printfinalv
.section .rodata.str1.1
.LC12:
.string "right\n"
.LC13:
.string "%d are wrong\n"
.text
.globl _Z12check_answerv
.type _Z12check_answerv, @function
_Z12check_answerv:
.LFB2063:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movl tpoints(%rip), %eax
testl %eax, %eax
jle .L54
leal 1(%rax), %edi
movl $1, %eax
movl $0, %esi
movl $0, %r8d
leaq values(%rip), %rcx
leaq v(%rip), %rdx
movl $1, %r9d
jmp .L57
.L60:
addl $1, %esi
movl %r9d, %r8d
.L55:
addq $1, %rax
cmpq %rax, %rdi
je .L63
.L57:
movss (%rcx,%rax,4), %xmm0
ucomiss (%rdx,%rax,4), %xmm0
jp .L60
je .L55
jmp .L60
.L63:
testl %r8d, %r8d
je .L54
movl %esi, %edx
leaq .LC13(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L53
.L54:
leaq .LC12(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
.L53:
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2063:
.size _Z12check_answerv, .-_Z12check_answerv
.globl _Z40__device_stub__Z14do_math_kernelPfS_S_iiPfS_S_ii
.type _Z40__device_stub__Z14do_math_kernelPfS_S_iiPfS_S_ii, @function
_Z40__device_stub__Z14do_math_kernelPfS_S_iiPfS_S_ii:
.LFB2089:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movl %r8d, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movq %rsp, %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L68
.L64:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L69
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L68:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z14do_math_kernelPfS_S_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L64
.L69:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2089:
.size _Z40__device_stub__Z14do_math_kernelPfS_S_iiPfS_S_ii, .-_Z40__device_stub__Z14do_math_kernelPfS_S_iiPfS_S_ii
.globl _Z14do_math_kernelPfS_S_ii
.type _Z14do_math_kernelPfS_S_ii, @function
_Z14do_math_kernelPfS_S_ii:
.LFB2090:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z40__device_stub__Z14do_math_kernelPfS_S_iiPfS_S_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2090:
.size _Z14do_math_kernelPfS_S_ii, .-_Z14do_math_kernelPfS_S_ii
.globl _Z14updateOnDevicev
.type _Z14updateOnDevicev, @function
_Z14updateOnDevicev:
.LFB2060:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
subq $64, %rsp
.cfi_def_cfa_offset 80
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
movl tpoints(%rip), %eax
leal -8(,%rax,4), %ebx
movslq %ebx, %rbx
leaq 8(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbx, %rdx
leaq 8+values(%rip), %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
leaq 16(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbx, %rdx
leaq 8+oldval(%rip), %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
leaq 24(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movl tpoints(%rip), %edx
movl $1024, %eax
cmpl %eax, %edx
cmovle %edx, %eax
movl %eax, 32(%rsp)
movl $1, 36(%rsp)
leal 1023(%rdx), %eax
testl %edx, %edx
cmovns %edx, %eax
sarl $10, %eax
addl $1, %eax
movl %eax, 44(%rsp)
movl $1, 48(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 32(%rsp), %rdx
movl $1, %ecx
movq 44(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L76
.L73:
movl $2, %ecx
movq %rbx, %rdx
movq 8(%rsp), %rsi
leaq 8+values(%rip), %rdi
call cudaMemcpy@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L77
addq $64, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
ret
.L76:
.cfi_restore_state
movl tpoints(%rip), %r8d
movl nsteps(%rip), %ecx
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z40__device_stub__Z14do_math_kernelPfS_S_iiPfS_S_ii
jmp .L73
.L77:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2060:
.size _Z14updateOnDevicev, .-_Z14updateOnDevicev
.section .rodata.str1.1
.LC14:
.string "%d"
.section .rodata.str1.8
.align 8
.LC15:
.string "Initializing points on the line...\n"
.align 8
.LC16:
.string "Updating all points for all time steps...\n"
.section .rodata.str1.1
.LC17:
.string "Printing final results...\n"
.LC18:
.string "\nDone.\n\n"
.text
.globl main
.type main, @function
main:
.LFB2064:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $8, %rsp
.cfi_def_cfa_offset 32
movq %rsi, %rbx
movq 8(%rsi), %rdi
leaq tpoints(%rip), %rdx
leaq .LC14(%rip), %rbp
movq %rbp, %rsi
movl $0, %eax
call __isoc23_sscanf@PLT
movq 16(%rbx), %rdi
leaq nsteps(%rip), %rdx
movq %rbp, %rsi
movl $0, %eax
call __isoc23_sscanf@PLT
call _Z11check_paramv
leaq .LC15(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
call _Z9init_linev
leaq .LC16(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
call _Z14updateOnDevicev
leaq .LC17(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
call _Z10printfinalv
leaq .LC18(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %eax
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2064:
.size main, .-main
.section .rodata.str1.1
.LC19:
.string "_Z14do_math_kernelPfS_S_ii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2092:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC19(%rip), %rdx
movq %rdx, %rcx
leaq _Z14do_math_kernelPfS_S_ii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2092:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.globl n
.bss
.align 32
.type n, @object
.size n, 4000008
n:
.zero 4000008
.globl o
.align 32
.type o, @object
.size o, 4000008
o:
.zero 4000008
.globl v
.align 32
.type v, @object
.size v, 4000008
v:
.zero 4000008
.globl newval
.align 32
.type newval, @object
.size newval, 4000008
newval:
.zero 4000008
.globl oldval
.align 32
.type oldval, @object
.size oldval, 4000008
oldval:
.zero 4000008
.globl values
.align 32
.type values, @object
.size values, 4000008
values:
.zero 4000008
.globl rcode
.align 4
.type rcode, @object
.size rcode, 4
rcode:
.zero 4
.globl tpoints
.align 4
.type tpoints, @object
.size tpoints, 4
tpoints:
.zero 4
.globl nsteps
.align 4
.type nsteps, @object
.size nsteps, 4
nsteps:
.zero 4
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC7:
.long 1086918619
.align 4
.LC8:
.long 1065353216
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC9:
.long -2147483648
.long -1077474755
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | /*********************************************************
* DESCRIPTION: *
* Serial Concurrent Wave Equation - C Version *
* This program implements the concurrent wave equation*
**********************************************************/
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<time.h>
#define MAXPOINTS 1000000
#define MINPOINTS 20
#define MAXSTEPS 1000000
#define PI 3.14159265
void check_param(void);
void init_line(void);
void update(void);
void printfinal(void);
int nsteps, /*number of time steps*/
tpoints, /*total points along string*/
rcode; /*generic return code*/
float values[MAXPOINTS + 2], /*values at time t*/
oldval[MAXPOINTS + 2], /*values at time (t-dt)*/
newval[MAXPOINTS + 2]; /*values at time (t+dt)*/
float v[MAXPOINTS + 2], /*serial used for comparing answers with parallel*/
o[MAXPOINTS + 2],
n[MAXPOINTS + 2];
/*********************************************************
* Check input values from parameters *
**********************************************************/
void check_param(void){
char tchar[20];
/*check number of points, number of iterations*/
while((tpoints < MINPOINTS) || (tpoints > MAXPOINTS)){
printf("Enter number of points along vibrating string [%d-%d]: ");
scanf("%s", tchar);
tpoints = atoi(tchar);
if((tpoints < MINPOINTS) || (tpoints > MAXPOINTS)){
printf("Invalid. Please enter value between %d and %d\n", MINPOINTS, MAXPOINTS);
}
}
while((nsteps < 1) || (nsteps > MAXSTEPS)){
printf("Enter number of time steps [1-%d]: ", MAXSTEPS);
scanf("%s", tchar);
nsteps = atoi(tchar);
if((nsteps < 1) || (nsteps > MAXSTEPS)){
printf("Invalid. Please enter value between 1 and %d\n", MAXSTEPS);
}
}
printf("Using points = %d, steps = %d\n", tpoints, nsteps);
}
/*********************************************************
* Initialize points on line *
**********************************************************/
void init_line(void){
int i, j;
float x, fac, k, tmp;
/*Calculate initial values based on sine curve*/
fac = 2.0 * PI;
k = 0.0;
tmp = tpoints - 1;
for(j = 1; j <= tpoints; j++){
x = k/tmp;
values[j] = sin(fac * x);
v[j] = values[j];
k = k + 1.0;
}
/*Initialize old values array*/
for(i = 1; i <= tpoints; i++){
oldval[i] = values[i];
o[i] = v[i];
}
}
/*********************************************************
* Calculate new values using wave equation *
**********************************************************/
__global__ void do_math_kernel(float *val_d, float *old_d, float *new_d, int num_of_steps, int num_of_points){
float dtime, c, dx, tau, sqtau;
dtime = 0.3;
c = 1.0;
dx = 1.0;
tau = (c * dtime / dx);
sqtau = tau * tau;
__shared__ float val_ds[1024];
__shared__ float old_ds[1024];
__shared__ float new_ds[1024];
int tx = threadIdx.x;
int index = blockIdx.x * blockDim.x + threadIdx.x;
val_ds[tx] = val_d[index];
old_ds[tx] = old_d[index];
for(int i = 1; i <= num_of_steps; i++){
/*This part needs to access elements from global memory of GPU*/
/*if(index == 0 || index == num_of_points - 1) new_d[index] = 0.0;
else new_d[index] = (2.0 * val_d[index]) - old_d[index] + (sqtau * (-2.0) * val_d[index]);
old_d[index] = val_d[index];
val_d[index] = new_d[index];*/
/*This part accesses elements from shared memory of GPU -> faster*/
/*if(index == 0 || index == num_of_points - 1) new_ds[tx] = 0.0;
else new_ds[tx] = (2.0 * val_ds[tx]) - old_ds[tx] + (sqtau * (-2.0) * val_ds[tx]);
old_ds[tx] = val_ds[tx];
val_ds[tx] = new_ds[tx];*/
/*This part only takes values[2~tpoints-1] total tpoins-2 points from CPU to GPU in order to reduce branch overhead*/
new_ds[tx] = (2.0 * val_ds[tx]) - old_ds[tx] + (sqtau * (-2.0) * val_ds[tx]);
old_ds[tx] = val_ds[tx];
val_ds[tx] = new_ds[tx];
}
__syncthreads();
val_d[index] = val_ds[tx];
}
void do_math(int i){
float dtime, c, dx, tau, sqtau;
dtime = 0.3;
c = 1.0;
dx = 1.0;
tau = (c * dtime / dx);
sqtau = tau * tau;
newval[i] = (2.0 * values[i]) - oldval[i] + (sqtau * (-2.0) * values[i]);
//n[i] = (2.0 * v[i]) - o[i] + (sqtau * (-2.0) * v[i]);
}
/***********************************************************
* Update all values along line a specified number of times*
***********************************************************/
void updateOnDevice(){
//int size = tpoints * sizeof(float);
int size = (tpoints - 2) * sizeof(float);
float *val_d, *old_d, *new_d; //memory on device
/*1.Allocate device memory and move initiail values[] and oldval[] to GPU*/
cudaMalloc(&val_d, size);
//cudaMemcpy(val_d, values+1, size, cudaMemcpyHostToDevice);
cudaMemcpy(val_d, values+2, size, cudaMemcpyHostToDevice);
cudaMalloc(&old_d, size);
//cudaMemcpy(old_d, oldval+1, size, cudaMemcpyHostToDevice);
cudaMemcpy(old_d, oldval+2, size, cudaMemcpyHostToDevice);
cudaMalloc(&new_d, size);
/*2.Invoke kernel function, each thread calculates a value[] element*/
int threads_per_block, blocks_per_grid = tpoints/1024 + 1;
if(tpoints > 1024) threads_per_block = 1024;
else threads_per_block = tpoints;
dim3 dimBlock(threads_per_block);
dim3 dimGrid(blocks_per_grid);
do_math_kernel<<<dimGrid,dimBlock>>>(val_d, old_d, new_d, nsteps, tpoints);
/*3.Read final results from GPU to CPU*/
//cudaMemcpy(values+1, val_d, size, cudaMemcpyDeviceToHost);
cudaMemcpy(values+2, val_d, size, cudaMemcpyDeviceToHost);
cudaFree(val_d); cudaFree(old_d); cudaFree(new_d);
}
void update(){
int i, j;
/*Update values for each time step*/
for(i = 1; i <= nsteps; i++){
/*Update points along line for this time step*/
for(j = 1; j <= tpoints; j++){
/*global endpoints*/
if((j == 1) || (j == tpoints)){
newval[j] = 0.0;
//n[j] = 0.0;
}
else do_math(j);
}
/*Update old values with new values*/
for(j = 1; j <= tpoints; j++){
oldval[j] = values[j];
values[j] = newval[j];
/*o[j] = v[j];
v[j] = n[j];*/
}
}
}
/**********************************************************
* Print final results *
**********************************************************/
void printfinal(){
int i;
for(i = 1; i <= tpoints; i++){
printf("%6.4f ", values[i]);
if(i%10 == 0) printf("\n");
}
}
/**********************************************************
* Check serial and parallel answers *
**********************************************************/
void check_answer(){
int wrong = 0, num = 0;
for(int i = 1; i <= tpoints; i++){
if(values[i]!=v[i]){
wrong = 1;
num++;
}
}
if(wrong == 0) printf("right\n");
else printf("%d are wrong\n", num);
/*In command line ./ cuda_wave.out > [filename] to pipe output to the file
then use diff file1 file2 to see if there is any difference*/
}
/*********************************************************
* Main Program *
**********************************************************/
int main(int argc, char *argv[]){
sscanf(argv[1], "%d", &tpoints);
sscanf(argv[2], "%d", &nsteps);
check_param();
printf("Initializing points on the line...\n");
init_line();
//printfinal();
printf("Updating all points for all time steps...\n");
//update();
updateOnDevice();
printf("Printing final results...\n");
printfinal();
printf("\nDone.\n\n");
return 0;
} | /*********************************************************
* DESCRIPTION: *
* Serial Concurrent Wave Equation - C Version *
* This program implements the concurrent wave equation*
**********************************************************/
#include <hip/hip_runtime.h>
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<time.h>
#define MAXPOINTS 1000000
#define MINPOINTS 20
#define MAXSTEPS 1000000
#define PI 3.14159265
void check_param(void);
void init_line(void);
void update(void);
void printfinal(void);
int nsteps, /*number of time steps*/
tpoints, /*total points along string*/
rcode; /*generic return code*/
float values[MAXPOINTS + 2], /*values at time t*/
oldval[MAXPOINTS + 2], /*values at time (t-dt)*/
newval[MAXPOINTS + 2]; /*values at time (t+dt)*/
float v[MAXPOINTS + 2], /*serial used for comparing answers with parallel*/
o[MAXPOINTS + 2],
n[MAXPOINTS + 2];
/*********************************************************
* Check input values from parameters *
**********************************************************/
void check_param(void){
char tchar[20];
/*check number of points, number of iterations*/
while((tpoints < MINPOINTS) || (tpoints > MAXPOINTS)){
printf("Enter number of points along vibrating string [%d-%d]: ");
scanf("%s", tchar);
tpoints = atoi(tchar);
if((tpoints < MINPOINTS) || (tpoints > MAXPOINTS)){
printf("Invalid. Please enter value between %d and %d\n", MINPOINTS, MAXPOINTS);
}
}
while((nsteps < 1) || (nsteps > MAXSTEPS)){
printf("Enter number of time steps [1-%d]: ", MAXSTEPS);
scanf("%s", tchar);
nsteps = atoi(tchar);
if((nsteps < 1) || (nsteps > MAXSTEPS)){
printf("Invalid. Please enter value between 1 and %d\n", MAXSTEPS);
}
}
printf("Using points = %d, steps = %d\n", tpoints, nsteps);
}
/*********************************************************
* Initialize points on line *
**********************************************************/
void init_line(void){
int i, j;
float x, fac, k, tmp;
/*Calculate initial values based on sine curve*/
fac = 2.0 * PI;
k = 0.0;
tmp = tpoints - 1;
for(j = 1; j <= tpoints; j++){
x = k/tmp;
values[j] = sin(fac * x);
v[j] = values[j];
k = k + 1.0;
}
/*Initialize old values array*/
for(i = 1; i <= tpoints; i++){
oldval[i] = values[i];
o[i] = v[i];
}
}
/*********************************************************
* Calculate new values using wave equation *
**********************************************************/
__global__ void do_math_kernel(float *val_d, float *old_d, float *new_d, int num_of_steps, int num_of_points){
float dtime, c, dx, tau, sqtau;
dtime = 0.3;
c = 1.0;
dx = 1.0;
tau = (c * dtime / dx);
sqtau = tau * tau;
__shared__ float val_ds[1024];
__shared__ float old_ds[1024];
__shared__ float new_ds[1024];
int tx = threadIdx.x;
int index = blockIdx.x * blockDim.x + threadIdx.x;
val_ds[tx] = val_d[index];
old_ds[tx] = old_d[index];
for(int i = 1; i <= num_of_steps; i++){
/*This part needs to access elements from global memory of GPU*/
/*if(index == 0 || index == num_of_points - 1) new_d[index] = 0.0;
else new_d[index] = (2.0 * val_d[index]) - old_d[index] + (sqtau * (-2.0) * val_d[index]);
old_d[index] = val_d[index];
val_d[index] = new_d[index];*/
/*This part accesses elements from shared memory of GPU -> faster*/
/*if(index == 0 || index == num_of_points - 1) new_ds[tx] = 0.0;
else new_ds[tx] = (2.0 * val_ds[tx]) - old_ds[tx] + (sqtau * (-2.0) * val_ds[tx]);
old_ds[tx] = val_ds[tx];
val_ds[tx] = new_ds[tx];*/
/*This part only takes values[2~tpoints-1] total tpoins-2 points from CPU to GPU in order to reduce branch overhead*/
new_ds[tx] = (2.0 * val_ds[tx]) - old_ds[tx] + (sqtau * (-2.0) * val_ds[tx]);
old_ds[tx] = val_ds[tx];
val_ds[tx] = new_ds[tx];
}
__syncthreads();
val_d[index] = val_ds[tx];
}
void do_math(int i){
float dtime, c, dx, tau, sqtau;
dtime = 0.3;
c = 1.0;
dx = 1.0;
tau = (c * dtime / dx);
sqtau = tau * tau;
newval[i] = (2.0 * values[i]) - oldval[i] + (sqtau * (-2.0) * values[i]);
//n[i] = (2.0 * v[i]) - o[i] + (sqtau * (-2.0) * v[i]);
}
/***********************************************************
* Update all values along line a specified number of times*
***********************************************************/
void updateOnDevice(){
//int size = tpoints * sizeof(float);
int size = (tpoints - 2) * sizeof(float);
float *val_d, *old_d, *new_d; //memory on device
/*1.Allocate device memory and move initiail values[] and oldval[] to GPU*/
hipMalloc(&val_d, size);
//cudaMemcpy(val_d, values+1, size, cudaMemcpyHostToDevice);
hipMemcpy(val_d, values+2, size, hipMemcpyHostToDevice);
hipMalloc(&old_d, size);
//cudaMemcpy(old_d, oldval+1, size, cudaMemcpyHostToDevice);
hipMemcpy(old_d, oldval+2, size, hipMemcpyHostToDevice);
hipMalloc(&new_d, size);
/*2.Invoke kernel function, each thread calculates a value[] element*/
int threads_per_block, blocks_per_grid = tpoints/1024 + 1;
if(tpoints > 1024) threads_per_block = 1024;
else threads_per_block = tpoints;
dim3 dimBlock(threads_per_block);
dim3 dimGrid(blocks_per_grid);
do_math_kernel<<<dimGrid,dimBlock>>>(val_d, old_d, new_d, nsteps, tpoints);
/*3.Read final results from GPU to CPU*/
//cudaMemcpy(values+1, val_d, size, cudaMemcpyDeviceToHost);
hipMemcpy(values+2, val_d, size, hipMemcpyDeviceToHost);
hipFree(val_d); hipFree(old_d); hipFree(new_d);
}
void update(){
int i, j;
/*Update values for each time step*/
for(i = 1; i <= nsteps; i++){
/*Update points along line for this time step*/
for(j = 1; j <= tpoints; j++){
/*global endpoints*/
if((j == 1) || (j == tpoints)){
newval[j] = 0.0;
//n[j] = 0.0;
}
else do_math(j);
}
/*Update old values with new values*/
for(j = 1; j <= tpoints; j++){
oldval[j] = values[j];
values[j] = newval[j];
/*o[j] = v[j];
v[j] = n[j];*/
}
}
}
/**********************************************************
* Print final results *
**********************************************************/
void printfinal(){
int i;
for(i = 1; i <= tpoints; i++){
printf("%6.4f ", values[i]);
if(i%10 == 0) printf("\n");
}
}
/**********************************************************
* Check serial and parallel answers *
**********************************************************/
void check_answer(){
int wrong = 0, num = 0;
for(int i = 1; i <= tpoints; i++){
if(values[i]!=v[i]){
wrong = 1;
num++;
}
}
if(wrong == 0) printf("right\n");
else printf("%d are wrong\n", num);
/*In command line ./ cuda_wave.out > [filename] to pipe output to the file
then use diff file1 file2 to see if there is any difference*/
}
/*********************************************************
* Main Program *
**********************************************************/
int main(int argc, char *argv[]){
sscanf(argv[1], "%d", &tpoints);
sscanf(argv[2], "%d", &nsteps);
check_param();
printf("Initializing points on the line...\n");
init_line();
//printfinal();
printf("Updating all points for all time steps...\n");
//update();
updateOnDevice();
printf("Printing final results...\n");
printfinal();
printf("\nDone.\n\n");
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | /*********************************************************
* DESCRIPTION: *
* Serial Concurrent Wave Equation - C Version *
* This program implements the concurrent wave equation*
**********************************************************/
#include <hip/hip_runtime.h>
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<time.h>
#define MAXPOINTS 1000000
#define MINPOINTS 20
#define MAXSTEPS 1000000
#define PI 3.14159265
void check_param(void);
void init_line(void);
void update(void);
void printfinal(void);
int nsteps, /*number of time steps*/
tpoints, /*total points along string*/
rcode; /*generic return code*/
float values[MAXPOINTS + 2], /*values at time t*/
oldval[MAXPOINTS + 2], /*values at time (t-dt)*/
newval[MAXPOINTS + 2]; /*values at time (t+dt)*/
float v[MAXPOINTS + 2], /*serial used for comparing answers with parallel*/
o[MAXPOINTS + 2],
n[MAXPOINTS + 2];
/*********************************************************
* Check input values from parameters *
**********************************************************/
void check_param(void){
char tchar[20];
/*check number of points, number of iterations*/
while((tpoints < MINPOINTS) || (tpoints > MAXPOINTS)){
printf("Enter number of points along vibrating string [%d-%d]: ");
scanf("%s", tchar);
tpoints = atoi(tchar);
if((tpoints < MINPOINTS) || (tpoints > MAXPOINTS)){
printf("Invalid. Please enter value between %d and %d\n", MINPOINTS, MAXPOINTS);
}
}
while((nsteps < 1) || (nsteps > MAXSTEPS)){
printf("Enter number of time steps [1-%d]: ", MAXSTEPS);
scanf("%s", tchar);
nsteps = atoi(tchar);
if((nsteps < 1) || (nsteps > MAXSTEPS)){
printf("Invalid. Please enter value between 1 and %d\n", MAXSTEPS);
}
}
printf("Using points = %d, steps = %d\n", tpoints, nsteps);
}
/*********************************************************
* Initialize points on line *
**********************************************************/
void init_line(void){
int i, j;
float x, fac, k, tmp;
/*Calculate initial values based on sine curve*/
fac = 2.0 * PI;
k = 0.0;
tmp = tpoints - 1;
for(j = 1; j <= tpoints; j++){
x = k/tmp;
values[j] = sin(fac * x);
v[j] = values[j];
k = k + 1.0;
}
/*Initialize old values array*/
for(i = 1; i <= tpoints; i++){
oldval[i] = values[i];
o[i] = v[i];
}
}
/*********************************************************
* Calculate new values using wave equation *
**********************************************************/
__global__ void do_math_kernel(float *val_d, float *old_d, float *new_d, int num_of_steps, int num_of_points){
float dtime, c, dx, tau, sqtau;
dtime = 0.3;
c = 1.0;
dx = 1.0;
tau = (c * dtime / dx);
sqtau = tau * tau;
__shared__ float val_ds[1024];
__shared__ float old_ds[1024];
__shared__ float new_ds[1024];
int tx = threadIdx.x;
int index = blockIdx.x * blockDim.x + threadIdx.x;
val_ds[tx] = val_d[index];
old_ds[tx] = old_d[index];
for(int i = 1; i <= num_of_steps; i++){
/*This part needs to access elements from global memory of GPU*/
/*if(index == 0 || index == num_of_points - 1) new_d[index] = 0.0;
else new_d[index] = (2.0 * val_d[index]) - old_d[index] + (sqtau * (-2.0) * val_d[index]);
old_d[index] = val_d[index];
val_d[index] = new_d[index];*/
/*This part accesses elements from shared memory of GPU -> faster*/
/*if(index == 0 || index == num_of_points - 1) new_ds[tx] = 0.0;
else new_ds[tx] = (2.0 * val_ds[tx]) - old_ds[tx] + (sqtau * (-2.0) * val_ds[tx]);
old_ds[tx] = val_ds[tx];
val_ds[tx] = new_ds[tx];*/
/*This part only takes values[2~tpoints-1] total tpoins-2 points from CPU to GPU in order to reduce branch overhead*/
new_ds[tx] = (2.0 * val_ds[tx]) - old_ds[tx] + (sqtau * (-2.0) * val_ds[tx]);
old_ds[tx] = val_ds[tx];
val_ds[tx] = new_ds[tx];
}
__syncthreads();
val_d[index] = val_ds[tx];
}
void do_math(int i){
float dtime, c, dx, tau, sqtau;
dtime = 0.3;
c = 1.0;
dx = 1.0;
tau = (c * dtime / dx);
sqtau = tau * tau;
newval[i] = (2.0 * values[i]) - oldval[i] + (sqtau * (-2.0) * values[i]);
//n[i] = (2.0 * v[i]) - o[i] + (sqtau * (-2.0) * v[i]);
}
/***********************************************************
* Update all values along line a specified number of times*
***********************************************************/
void updateOnDevice(){
//int size = tpoints * sizeof(float);
int size = (tpoints - 2) * sizeof(float);
float *val_d, *old_d, *new_d; //memory on device
/*1.Allocate device memory and move initiail values[] and oldval[] to GPU*/
hipMalloc(&val_d, size);
//cudaMemcpy(val_d, values+1, size, cudaMemcpyHostToDevice);
hipMemcpy(val_d, values+2, size, hipMemcpyHostToDevice);
hipMalloc(&old_d, size);
//cudaMemcpy(old_d, oldval+1, size, cudaMemcpyHostToDevice);
hipMemcpy(old_d, oldval+2, size, hipMemcpyHostToDevice);
hipMalloc(&new_d, size);
/*2.Invoke kernel function, each thread calculates a value[] element*/
int threads_per_block, blocks_per_grid = tpoints/1024 + 1;
if(tpoints > 1024) threads_per_block = 1024;
else threads_per_block = tpoints;
dim3 dimBlock(threads_per_block);
dim3 dimGrid(blocks_per_grid);
do_math_kernel<<<dimGrid,dimBlock>>>(val_d, old_d, new_d, nsteps, tpoints);
/*3.Read final results from GPU to CPU*/
//cudaMemcpy(values+1, val_d, size, cudaMemcpyDeviceToHost);
hipMemcpy(values+2, val_d, size, hipMemcpyDeviceToHost);
hipFree(val_d); hipFree(old_d); hipFree(new_d);
}
void update(){
int i, j;
/*Update values for each time step*/
for(i = 1; i <= nsteps; i++){
/*Update points along line for this time step*/
for(j = 1; j <= tpoints; j++){
/*global endpoints*/
if((j == 1) || (j == tpoints)){
newval[j] = 0.0;
//n[j] = 0.0;
}
else do_math(j);
}
/*Update old values with new values*/
for(j = 1; j <= tpoints; j++){
oldval[j] = values[j];
values[j] = newval[j];
/*o[j] = v[j];
v[j] = n[j];*/
}
}
}
/**********************************************************
* Print final results *
**********************************************************/
void printfinal(){
int i;
for(i = 1; i <= tpoints; i++){
printf("%6.4f ", values[i]);
if(i%10 == 0) printf("\n");
}
}
/**********************************************************
* Check serial and parallel answers *
**********************************************************/
void check_answer(){
int wrong = 0, num = 0;
for(int i = 1; i <= tpoints; i++){
if(values[i]!=v[i]){
wrong = 1;
num++;
}
}
if(wrong == 0) printf("right\n");
else printf("%d are wrong\n", num);
/*In command line ./ cuda_wave.out > [filename] to pipe output to the file
then use diff file1 file2 to see if there is any difference*/
}
/*********************************************************
* Main Program *
**********************************************************/
int main(int argc, char *argv[]){
sscanf(argv[1], "%d", &tpoints);
sscanf(argv[2], "%d", &nsteps);
check_param();
printf("Initializing points on the line...\n");
init_line();
//printfinal();
printf("Updating all points for all time steps...\n");
//update();
updateOnDevice();
printf("Printing final results...\n");
printfinal();
printf("\nDone.\n\n");
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z14do_math_kernelPfS_S_ii
.globl _Z14do_math_kernelPfS_S_ii
.p2align 8
.type _Z14do_math_kernelPfS_S_ii,@function
_Z14do_math_kernelPfS_S_ii:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b128 s[4:7], s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_load_b32 s2, s[0:1], 0x18
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[3:4], 2, v[1:2]
v_add_co_u32 v1, vcc_lo, s4, v3
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v2, vcc_lo, s5, v4, vcc_lo
v_add_co_u32 v3, vcc_lo, s6, v3
v_add_co_ci_u32_e32 v4, vcc_lo, s7, v4, vcc_lo
s_waitcnt lgkmcnt(0)
s_cmp_lt_i32 s2, 1
global_load_b32 v5, v[1:2], off
global_load_b32 v4, v[3:4], off
v_lshlrev_b32_e32 v3, 2, v0
s_delay_alu instid0(VALU_DEP_1)
v_or_b32_e32 v0, 0x1000, v3
s_waitcnt vmcnt(0)
ds_store_2addr_stride64_b32 v3, v4, v5 offset1:16
s_cbranch_scc1 .LBB0_4
ds_load_b32 v5, v0
ds_load_b32 v4, v3
s_mov_b32 s1, 0xbfc70a3d
s_brev_b32 s0, 1
.LBB0_2:
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_dual_mov_b32 v7, v4 :: v_dual_mov_b32 v4, v5
s_add_i32 s2, s2, -1
s_cmp_eq_u32 s2, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cvt_f64_f32_e32 v[7:8], v7
v_cvt_f64_f32_e32 v[5:6], v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f64 v[7:8], v[5:6], 2.0, -v[7:8]
v_fma_f64 v[5:6], v[5:6], s[0:1], v[7:8]
s_delay_alu instid0(VALU_DEP_1)
v_cvt_f32_f64_e32 v5, v[5:6]
s_cbranch_scc0 .LBB0_2
ds_store_b32 v0, v5
ds_store_b32 v3, v4
.LBB0_4:
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
ds_load_b32 v0, v0
s_waitcnt lgkmcnt(0)
global_store_b32 v[1:2], v0, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z14do_math_kernelPfS_S_ii
.amdhsa_group_segment_fixed_size 8192
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 9
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z14do_math_kernelPfS_S_ii, .Lfunc_end0-_Z14do_math_kernelPfS_S_ii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 8192
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z14do_math_kernelPfS_S_ii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z14do_math_kernelPfS_S_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 9
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | /*********************************************************
* DESCRIPTION: *
* Serial Concurrent Wave Equation - C Version *
* This program implements the concurrent wave equation*
**********************************************************/
#include <hip/hip_runtime.h>
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<time.h>
#define MAXPOINTS 1000000
#define MINPOINTS 20
#define MAXSTEPS 1000000
#define PI 3.14159265
void check_param(void);
void init_line(void);
void update(void);
void printfinal(void);
int nsteps, /*number of time steps*/
tpoints, /*total points along string*/
rcode; /*generic return code*/
float values[MAXPOINTS + 2], /*values at time t*/
oldval[MAXPOINTS + 2], /*values at time (t-dt)*/
newval[MAXPOINTS + 2]; /*values at time (t+dt)*/
float v[MAXPOINTS + 2], /*serial used for comparing answers with parallel*/
o[MAXPOINTS + 2],
n[MAXPOINTS + 2];
/*********************************************************
* Check input values from parameters *
**********************************************************/
void check_param(void){
char tchar[20];
/*check number of points, number of iterations*/
while((tpoints < MINPOINTS) || (tpoints > MAXPOINTS)){
printf("Enter number of points along vibrating string [%d-%d]: ");
scanf("%s", tchar);
tpoints = atoi(tchar);
if((tpoints < MINPOINTS) || (tpoints > MAXPOINTS)){
printf("Invalid. Please enter value between %d and %d\n", MINPOINTS, MAXPOINTS);
}
}
while((nsteps < 1) || (nsteps > MAXSTEPS)){
printf("Enter number of time steps [1-%d]: ", MAXSTEPS);
scanf("%s", tchar);
nsteps = atoi(tchar);
if((nsteps < 1) || (nsteps > MAXSTEPS)){
printf("Invalid. Please enter value between 1 and %d\n", MAXSTEPS);
}
}
printf("Using points = %d, steps = %d\n", tpoints, nsteps);
}
/*********************************************************
* Initialize points on line *
**********************************************************/
void init_line(void){
int i, j;
float x, fac, k, tmp;
/*Calculate initial values based on sine curve*/
fac = 2.0 * PI;
k = 0.0;
tmp = tpoints - 1;
for(j = 1; j <= tpoints; j++){
x = k/tmp;
values[j] = sin(fac * x);
v[j] = values[j];
k = k + 1.0;
}
/*Initialize old values array*/
for(i = 1; i <= tpoints; i++){
oldval[i] = values[i];
o[i] = v[i];
}
}
/*********************************************************
* Calculate new values using wave equation *
**********************************************************/
__global__ void do_math_kernel(float *val_d, float *old_d, float *new_d, int num_of_steps, int num_of_points){
float dtime, c, dx, tau, sqtau;
dtime = 0.3;
c = 1.0;
dx = 1.0;
tau = (c * dtime / dx);
sqtau = tau * tau;
__shared__ float val_ds[1024];
__shared__ float old_ds[1024];
__shared__ float new_ds[1024];
int tx = threadIdx.x;
int index = blockIdx.x * blockDim.x + threadIdx.x;
val_ds[tx] = val_d[index];
old_ds[tx] = old_d[index];
for(int i = 1; i <= num_of_steps; i++){
/*This part needs to access elements from global memory of GPU*/
/*if(index == 0 || index == num_of_points - 1) new_d[index] = 0.0;
else new_d[index] = (2.0 * val_d[index]) - old_d[index] + (sqtau * (-2.0) * val_d[index]);
old_d[index] = val_d[index];
val_d[index] = new_d[index];*/
/*This part accesses elements from shared memory of GPU -> faster*/
/*if(index == 0 || index == num_of_points - 1) new_ds[tx] = 0.0;
else new_ds[tx] = (2.0 * val_ds[tx]) - old_ds[tx] + (sqtau * (-2.0) * val_ds[tx]);
old_ds[tx] = val_ds[tx];
val_ds[tx] = new_ds[tx];*/
/*This part only takes values[2~tpoints-1] total tpoins-2 points from CPU to GPU in order to reduce branch overhead*/
new_ds[tx] = (2.0 * val_ds[tx]) - old_ds[tx] + (sqtau * (-2.0) * val_ds[tx]);
old_ds[tx] = val_ds[tx];
val_ds[tx] = new_ds[tx];
}
__syncthreads();
val_d[index] = val_ds[tx];
}
void do_math(int i){
float dtime, c, dx, tau, sqtau;
dtime = 0.3;
c = 1.0;
dx = 1.0;
tau = (c * dtime / dx);
sqtau = tau * tau;
newval[i] = (2.0 * values[i]) - oldval[i] + (sqtau * (-2.0) * values[i]);
//n[i] = (2.0 * v[i]) - o[i] + (sqtau * (-2.0) * v[i]);
}
/***********************************************************
* Update all values along line a specified number of times*
***********************************************************/
void updateOnDevice(){
//int size = tpoints * sizeof(float);
int size = (tpoints - 2) * sizeof(float);
float *val_d, *old_d, *new_d; //memory on device
/*1.Allocate device memory and move initiail values[] and oldval[] to GPU*/
hipMalloc(&val_d, size);
//cudaMemcpy(val_d, values+1, size, cudaMemcpyHostToDevice);
hipMemcpy(val_d, values+2, size, hipMemcpyHostToDevice);
hipMalloc(&old_d, size);
//cudaMemcpy(old_d, oldval+1, size, cudaMemcpyHostToDevice);
hipMemcpy(old_d, oldval+2, size, hipMemcpyHostToDevice);
hipMalloc(&new_d, size);
/*2.Invoke kernel function, each thread calculates a value[] element*/
int threads_per_block, blocks_per_grid = tpoints/1024 + 1;
if(tpoints > 1024) threads_per_block = 1024;
else threads_per_block = tpoints;
dim3 dimBlock(threads_per_block);
dim3 dimGrid(blocks_per_grid);
do_math_kernel<<<dimGrid,dimBlock>>>(val_d, old_d, new_d, nsteps, tpoints);
/*3.Read final results from GPU to CPU*/
//cudaMemcpy(values+1, val_d, size, cudaMemcpyDeviceToHost);
hipMemcpy(values+2, val_d, size, hipMemcpyDeviceToHost);
hipFree(val_d); hipFree(old_d); hipFree(new_d);
}
void update(){
int i, j;
/*Update values for each time step*/
for(i = 1; i <= nsteps; i++){
/*Update points along line for this time step*/
for(j = 1; j <= tpoints; j++){
/*global endpoints*/
if((j == 1) || (j == tpoints)){
newval[j] = 0.0;
//n[j] = 0.0;
}
else do_math(j);
}
/*Update old values with new values*/
for(j = 1; j <= tpoints; j++){
oldval[j] = values[j];
values[j] = newval[j];
/*o[j] = v[j];
v[j] = n[j];*/
}
}
}
/**********************************************************
* Print final results *
**********************************************************/
void printfinal(){
int i;
for(i = 1; i <= tpoints; i++){
printf("%6.4f ", values[i]);
if(i%10 == 0) printf("\n");
}
}
/**********************************************************
* Check serial and parallel answers *
**********************************************************/
void check_answer(){
int wrong = 0, num = 0;
for(int i = 1; i <= tpoints; i++){
if(values[i]!=v[i]){
wrong = 1;
num++;
}
}
if(wrong == 0) printf("right\n");
else printf("%d are wrong\n", num);
/*In command line ./ cuda_wave.out > [filename] to pipe output to the file
then use diff file1 file2 to see if there is any difference*/
}
/*********************************************************
* Main Program *
**********************************************************/
int main(int argc, char *argv[]){
sscanf(argv[1], "%d", &tpoints);
sscanf(argv[2], "%d", &nsteps);
check_param();
printf("Initializing points on the line...\n");
init_line();
//printfinal();
printf("Updating all points for all time steps...\n");
//update();
updateOnDevice();
printf("Printing final results...\n");
printfinal();
printf("\nDone.\n\n");
return 0;
} | .text
.file "wave.hip"
.globl _Z11check_paramv # -- Begin function _Z11check_paramv
.p2align 4, 0x90
.type _Z11check_paramv,@function
_Z11check_paramv: # @_Z11check_paramv
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
subq $24, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -24
.cfi_offset %rbp, -16
movl $-1000001, %ebp # imm = 0xFFF0BDBF
movl tpoints(%rip), %eax
addl %ebp, %eax
cmpl $-999982, %eax # imm = 0xFFF0BDD2
ja .LBB0_5
# %bb.1:
movq %rsp, %rbx
jmp .LBB0_2
.p2align 4, 0x90
.LBB0_4: # in Loop: Header=BB0_2 Depth=1
movl tpoints(%rip), %eax
addl %ebp, %eax
cmpl $-999981, %eax # imm = 0xFFF0BDD3
jae .LBB0_5
.LBB0_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movl $.L.str, %edi
xorl %eax, %eax
callq printf
movl $.L.str.1, %edi
movq %rbx, %rsi
xorl %eax, %eax
callq __isoc23_scanf
movq %rbx, %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movl %eax, tpoints(%rip)
addl $-1000001, %eax # imm = 0xFFF0BDBF
cmpl $-999982, %eax # imm = 0xFFF0BDD2
ja .LBB0_4
# %bb.3: # in Loop: Header=BB0_2 Depth=1
movl $.L.str.2, %edi
movl $20, %esi
movl $1000000, %edx # imm = 0xF4240
xorl %eax, %eax
callq printf
jmp .LBB0_4
.LBB0_5: # %.preheader
movl nsteps(%rip), %edx
leal -1000001(%rdx), %eax
cmpl $-1000001, %eax # imm = 0xFFF0BDBF
ja .LBB0_10
# %bb.6:
movq %rsp, %rbx
jmp .LBB0_7
.p2align 4, 0x90
.LBB0_9: # in Loop: Header=BB0_7 Depth=1
movl nsteps(%rip), %edx
leal -1000001(%rdx), %eax
cmpl $-1000000, %eax # imm = 0xFFF0BDC0
jae .LBB0_10
.LBB0_7: # %.lr.ph4
# =>This Inner Loop Header: Depth=1
movl $.L.str.3, %edi
movl $1000000, %esi # imm = 0xF4240
xorl %eax, %eax
callq printf
movl $.L.str.1, %edi
movq %rbx, %rsi
xorl %eax, %eax
callq __isoc23_scanf
movq %rbx, %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movl %eax, nsteps(%rip)
addl $-1000001, %eax # imm = 0xFFF0BDBF
cmpl $-1000001, %eax # imm = 0xFFF0BDBF
ja .LBB0_9
# %bb.8: # in Loop: Header=BB0_7 Depth=1
movl $.L.str.4, %edi
movl $1000000, %esi # imm = 0xF4240
xorl %eax, %eax
callq printf
jmp .LBB0_9
.LBB0_10: # %._crit_edge
movl tpoints(%rip), %esi
movl $.L.str.5, %edi
# kill: def $edx killed $edx killed $rdx
xorl %eax, %eax
callq printf
addq $24, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size _Z11check_paramv, .Lfunc_end0-_Z11check_paramv
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function _Z9init_linev
.LCPI1_0:
.long 0x40c90fdb # float 6.28318548
.LCPI1_1:
.long 0x3f800000 # float 1
.text
.globl _Z9init_linev
.p2align 4, 0x90
.type _Z9init_linev,@function
_Z9init_linev: # @_Z9init_linev
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $16, %rsp
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -16
movl tpoints(%rip), %eax
testl %eax, %eax
jle .LBB1_3
# %bb.1: # %.lr.ph.preheader
decl %eax
cvtsi2ss %eax, %xmm0
movss %xmm0, 12(%rsp) # 4-byte Spill
xorps %xmm0, %xmm0
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB1_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movss %xmm0, 8(%rsp) # 4-byte Spill
movss 8(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
divss 12(%rsp), %xmm0 # 4-byte Folded Reload
mulss .LCPI1_0(%rip), %xmm0
callq sinf
movss %xmm0, values+4(,%rbx,4)
movss %xmm0, v+4(,%rbx,4)
movss 8(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
addss .LCPI1_1(%rip), %xmm0
movslq tpoints(%rip), %rax
incq %rbx
cmpq %rax, %rbx
jl .LBB1_2
.LBB1_3: # %.preheader
testl %eax, %eax
jle .LBB1_4
# %bb.5: # %.lr.ph23.preheader
movl %eax, %ebx
shlq $2, %rbx
movl $oldval+4, %edi
movl $values+4, %esi
movq %rbx, %rdx
callq memcpy@PLT
movl $o+4, %edi
movl $v+4, %esi
movq %rbx, %rdx
addq $16, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp memcpy@PLT # TAILCALL
.LBB1_4: # %._crit_edge
.cfi_def_cfa_offset 32
addq $16, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z9init_linev, .Lfunc_end1-_Z9init_linev
.cfi_endproc
# -- End function
.globl _Z29__device_stub__do_math_kernelPfS_S_ii # -- Begin function _Z29__device_stub__do_math_kernelPfS_S_ii
.p2align 4, 0x90
.type _Z29__device_stub__do_math_kernelPfS_S_ii,@function
_Z29__device_stub__do_math_kernelPfS_S_ii: # @_Z29__device_stub__do_math_kernelPfS_S_ii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
movl %r8d, (%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
movq %rsp, %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z14do_math_kernelPfS_S_ii, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end2:
.size _Z29__device_stub__do_math_kernelPfS_S_ii, .Lfunc_end2-_Z29__device_stub__do_math_kernelPfS_S_ii
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function _Z7do_mathi
.LCPI3_0:
.quad 0xbfc70a3d80000000 # double -0.18000000715255737
.text
.globl _Z7do_mathi
.p2align 4, 0x90
.type _Z7do_mathi,@function
_Z7do_mathi: # @_Z7do_mathi
.cfi_startproc
# %bb.0:
movslq %edi, %rax
movss values(,%rax,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movaps %xmm0, %xmm1
addsd %xmm0, %xmm1
movss oldval(,%rax,4), %xmm2 # xmm2 = mem[0],zero,zero,zero
cvtss2sd %xmm2, %xmm2
subsd %xmm2, %xmm1
mulsd .LCPI3_0(%rip), %xmm0
addsd %xmm1, %xmm0
cvtsd2ss %xmm0, %xmm0
movss %xmm0, newval(,%rax,4)
retq
.Lfunc_end3:
.size _Z7do_mathi, .Lfunc_end3-_Z7do_mathi
.cfi_endproc
# -- End function
.globl _Z14updateOnDevicev # -- Begin function _Z14updateOnDevicev
.p2align 4, 0x90
.type _Z14updateOnDevicev,@function
_Z14updateOnDevicev: # @_Z14updateOnDevicev
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $160, %rsp
.cfi_def_cfa_offset 176
.cfi_offset %rbx, -16
movl tpoints(%rip), %eax
leal -8(,%rax,4), %eax
movslq %eax, %rbx
leaq 8(%rsp), %rdi
movq %rbx, %rsi
callq hipMalloc
movq 8(%rsp), %rdi
movl $values+8, %esi
movq %rbx, %rdx
movl $1, %ecx
callq hipMemcpy
leaq 16(%rsp), %rdi
movq %rbx, %rsi
callq hipMalloc
movq 16(%rsp), %rdi
movl $oldval+8, %esi
movq %rbx, %rdx
movl $1, %ecx
callq hipMemcpy
leaq 32(%rsp), %rdi
movq %rbx, %rsi
callq hipMalloc
movl tpoints(%rip), %eax
leal 1023(%rax), %edi
testl %eax, %eax
cmovnsl %eax, %edi
sarl $10, %edi
incl %edi
cmpl $1024, %eax # imm = 0x400
movl $1024, %edx # imm = 0x400
cmovll %eax, %edx
movabsq $4294967296, %rax # imm = 0x100000000
orq %rax, %rdx
orq %rax, %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB4_2
# %bb.1:
movq 8(%rsp), %rax
movq 16(%rsp), %rcx
movq 32(%rsp), %rdx
movl nsteps(%rip), %esi
movl tpoints(%rip), %edi
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movq %rdx, 88(%rsp)
movl %esi, 28(%rsp)
movl %edi, 24(%rsp)
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 88(%rsp), %rax
movq %rax, 128(%rsp)
leaq 28(%rsp), %rax
movq %rax, 136(%rsp)
leaq 24(%rsp), %rax
movq %rax, 144(%rsp)
leaq 72(%rsp), %rdi
leaq 56(%rsp), %rsi
leaq 48(%rsp), %rdx
leaq 40(%rsp), %rcx
callq __hipPopCallConfiguration
movq 72(%rsp), %rsi
movl 80(%rsp), %edx
movq 56(%rsp), %rcx
movl 64(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z14do_math_kernelPfS_S_ii, %edi
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB4_2:
movq 8(%rsp), %rsi
movl $values+8, %edi
movq %rbx, %rdx
movl $2, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq 32(%rsp), %rdi
callq hipFree
addq $160, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
retq
.Lfunc_end4:
.size _Z14updateOnDevicev, .Lfunc_end4-_Z14updateOnDevicev
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function _Z6updatev
.LCPI5_0:
.quad 0xbfc70a3d80000000 # double -0.18000000715255737
.text
.globl _Z6updatev
.p2align 4, 0x90
.type _Z6updatev,@function
_Z6updatev: # @_Z6updatev
.cfi_startproc
# %bb.0:
movl nsteps(%rip), %eax
testl %eax, %eax
jle .LBB5_12
# %bb.1: # %.preheader18.lr.ph
movl tpoints(%rip), %ecx
leal 1(%rcx), %edx
leaq -4(,%rcx,4), %rsi
leaq -4(,%rdx,4), %rdi
shlq $2, %rdx
movl $1, %r8d
movsd .LCPI5_0(%rip), %xmm0 # xmm0 = mem[0],zero
jmp .LBB5_2
.p2align 4, 0x90
.LBB5_11: # %._crit_edge
# in Loop: Header=BB5_2 Depth=1
leal 1(%r8), %r9d
cmpl %eax, %r8d
movl %r9d, %r8d
je .LBB5_12
.LBB5_2: # %.preheader18
# =>This Loop Header: Depth=1
# Child Loop BB5_4 Depth 2
# Child Loop BB5_10 Depth 2
testl %ecx, %ecx
jle .LBB5_8
# %bb.3: # %.lr.ph.preheader
# in Loop: Header=BB5_2 Depth=1
xorl %r9d, %r9d
jmp .LBB5_4
.p2align 4, 0x90
.LBB5_7: # in Loop: Header=BB5_4 Depth=2
movss %xmm1, newval+4(%r9)
addq $4, %r9
cmpq %r9, %rdi
je .LBB5_8
.LBB5_4: # %.lr.ph
# Parent Loop BB5_2 Depth=1
# => This Inner Loop Header: Depth=2
xorps %xmm1, %xmm1
testq %r9, %r9
je .LBB5_7
# %bb.5: # %.lr.ph
# in Loop: Header=BB5_4 Depth=2
cmpq %r9, %rsi
je .LBB5_7
# %bb.6: # in Loop: Header=BB5_4 Depth=2
movss values+4(%r9), %xmm1 # xmm1 = mem[0],zero,zero,zero
cvtss2sd %xmm1, %xmm1
movaps %xmm1, %xmm2
addsd %xmm1, %xmm2
movss oldval+4(%r9), %xmm3 # xmm3 = mem[0],zero,zero,zero
cvtss2sd %xmm3, %xmm3
subsd %xmm3, %xmm2
mulsd %xmm0, %xmm1
addsd %xmm2, %xmm1
cvtsd2ss %xmm1, %xmm1
jmp .LBB5_7
.p2align 4, 0x90
.LBB5_8: # %.preheader
# in Loop: Header=BB5_2 Depth=1
testl %ecx, %ecx
jle .LBB5_11
# %bb.9: # %.lr.ph23.preheader
# in Loop: Header=BB5_2 Depth=1
movl $4, %r9d
.p2align 4, 0x90
.LBB5_10: # %.lr.ph23
# Parent Loop BB5_2 Depth=1
# => This Inner Loop Header: Depth=2
movss values(%r9), %xmm1 # xmm1 = mem[0],zero,zero,zero
movss %xmm1, oldval(%r9)
movss newval(%r9), %xmm1 # xmm1 = mem[0],zero,zero,zero
movss %xmm1, values(%r9)
addq $4, %r9
cmpq %r9, %rdx
jne .LBB5_10
jmp .LBB5_11
.LBB5_12: # %._crit_edge26
retq
.Lfunc_end5:
.size _Z6updatev, .Lfunc_end5-_Z6updatev
.cfi_endproc
# -- End function
.globl _Z10printfinalv # -- Begin function _Z10printfinalv
.p2align 4, 0x90
.type _Z10printfinalv,@function
_Z10printfinalv: # @_Z10printfinalv
.cfi_startproc
# %bb.0:
cmpl $0, tpoints(%rip)
jle .LBB6_6
# %bb.1: # %.lr.ph.preheader
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
pushq %rax
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl $1, %ebx
xorl %r14d, %r14d
movl $3435973837, %r15d # imm = 0xCCCCCCCD
jmp .LBB6_2
.p2align 4, 0x90
.LBB6_4: # in Loop: Header=BB6_2 Depth=1
movslq tpoints(%rip), %rax
incq %r14
incl %ebx
cmpq %rax, %r14
jge .LBB6_5
.LBB6_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movl %ebx, %eax
imulq %r15, %rax
shrq $35, %rax
leal (%rax,%rax,4), %eax
leal -1(,%rax,2), %ebp
movss values+4(,%r14,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.6, %edi
movb $1, %al
callq printf
cmpl %r14d, %ebp
jne .LBB6_4
# %bb.3: # in Loop: Header=BB6_2 Depth=1
movl $10, %edi
callq putchar@PLT
jmp .LBB6_4
.LBB6_5:
addq $8, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %r14
.cfi_restore %r15
.cfi_restore %rbp
.LBB6_6: # %._crit_edge
retq
.Lfunc_end6:
.size _Z10printfinalv, .Lfunc_end6-_Z10printfinalv
.cfi_endproc
# -- End function
.globl _Z12check_answerv # -- Begin function _Z12check_answerv
.p2align 4, 0x90
.type _Z12check_answerv,@function
_Z12check_answerv: # @_Z12check_answerv
.cfi_startproc
# %bb.0:
movl tpoints(%rip), %eax
testl %eax, %eax
jle .LBB7_1
# %bb.5: # %.lr.ph.preheader
leaq 4(,%rax,4), %rax
xorl %esi, %esi
movl $4, %ecx
movl $1, %edx
xorl %edi, %edi
.p2align 4, 0x90
.LBB7_6: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movss values(%rcx), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss v(%rcx), %xmm1 # xmm1 = mem[0],zero,zero,zero
ucomiss %xmm1, %xmm0
cmpneqss %xmm1, %xmm0
cmovnel %edx, %edi
cmovpl %edx, %edi
movd %xmm0, %r8d
subl %r8d, %esi
addq $4, %rcx
cmpq %rcx, %rax
jne .LBB7_6
# %bb.2: # %._crit_edge.loopexit
testl %edi, %edi
sete %al
testb %al, %al
je .LBB7_7
.LBB7_4:
movl $.Lstr, %edi
jmp puts@PLT # TAILCALL
.LBB7_1:
xorl %esi, %esi
movb $1, %al
testb %al, %al
jne .LBB7_4
.LBB7_7:
movl $.L.str.9, %edi
xorl %eax, %eax
jmp printf # TAILCALL
.Lfunc_end7:
.size _Z12check_answerv, .Lfunc_end7-_Z12check_answerv
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function main
.LCPI8_0:
.long 0x40c90fdb # float 6.28318548
.LCPI8_1:
.long 0x3f800000 # float 1
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
pushq %rax
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rsi, %rbx
movq 8(%rsi), %rdi
movl $.L.str.10, %esi
movl $tpoints, %edx
xorl %eax, %eax
callq __isoc23_sscanf
movq 16(%rbx), %rdi
movl $.L.str.10, %esi
movl $nsteps, %edx
xorl %eax, %eax
callq __isoc23_sscanf
callq _Z11check_paramv
movl $.Lstr.1, %edi
callq puts@PLT
movl tpoints(%rip), %eax
testl %eax, %eax
jle .LBB8_3
# %bb.1: # %.lr.ph.i.preheader
decl %eax
cvtsi2ss %eax, %xmm0
movss %xmm0, 4(%rsp) # 4-byte Spill
xorps %xmm0, %xmm0
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB8_2: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
movss %xmm0, (%rsp) # 4-byte Spill
movss (%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
divss 4(%rsp), %xmm0 # 4-byte Folded Reload
mulss .LCPI8_0(%rip), %xmm0
callq sinf
movss %xmm0, values+4(,%rbx,4)
movss %xmm0, v+4(,%rbx,4)
movss (%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
addss .LCPI8_1(%rip), %xmm0
movslq tpoints(%rip), %rax
incq %rbx
cmpq %rax, %rbx
jl .LBB8_2
.LBB8_3: # %.preheader.i
testl %eax, %eax
jle .LBB8_5
# %bb.4: # %.lr.ph23.preheader.i
movl %eax, %ebx
shlq $2, %rbx
movl $oldval+4, %edi
movl $values+4, %esi
movq %rbx, %rdx
callq memcpy@PLT
movl $o+4, %edi
movl $v+4, %esi
movq %rbx, %rdx
callq memcpy@PLT
.LBB8_5: # %_Z9init_linev.exit
movl $.Lstr.2, %edi
callq puts@PLT
callq _Z14updateOnDevicev
movl $.Lstr.3, %edi
callq puts@PLT
cmpl $0, tpoints(%rip)
jle .LBB8_10
# %bb.6: # %.lr.ph.i5.preheader
movl $1, %ebx
xorl %r14d, %r14d
movl $3435973837, %r15d # imm = 0xCCCCCCCD
jmp .LBB8_7
.p2align 4, 0x90
.LBB8_9: # in Loop: Header=BB8_7 Depth=1
movslq tpoints(%rip), %rax
incq %r14
incl %ebx
cmpq %rax, %r14
jge .LBB8_10
.LBB8_7: # %.lr.ph.i5
# =>This Inner Loop Header: Depth=1
movl %ebx, %eax
imulq %r15, %rax
shrq $35, %rax
leal (%rax,%rax,4), %eax
leal -1(,%rax,2), %ebp
movss values+4(,%r14,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.6, %edi
movb $1, %al
callq printf
cmpl %r14d, %ebp
jne .LBB8_9
# %bb.8: # in Loop: Header=BB8_7 Depth=1
movl $10, %edi
callq putchar@PLT
jmp .LBB8_9
.LBB8_10: # %_Z10printfinalv.exit
movl $.Lstr.4, %edi
callq puts@PLT
xorl %eax, %eax
addq $8, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end8:
.size main, .Lfunc_end8-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB9_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB9_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z14do_math_kernelPfS_S_ii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end9:
.size __hip_module_ctor, .Lfunc_end9-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB10_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB10_2:
retq
.Lfunc_end10:
.size __hip_module_dtor, .Lfunc_end10-__hip_module_dtor
.cfi_endproc
# -- End function
.type nsteps,@object # @nsteps
.bss
.globl nsteps
.p2align 2, 0x0
nsteps:
.long 0 # 0x0
.size nsteps, 4
.type tpoints,@object # @tpoints
.globl tpoints
.p2align 2, 0x0
tpoints:
.long 0 # 0x0
.size tpoints, 4
.type rcode,@object # @rcode
.globl rcode
.p2align 2, 0x0
rcode:
.long 0 # 0x0
.size rcode, 4
.type values,@object # @values
.globl values
.p2align 4, 0x0
values:
.zero 4000008
.size values, 4000008
.type oldval,@object # @oldval
.globl oldval
.p2align 4, 0x0
oldval:
.zero 4000008
.size oldval, 4000008
.type newval,@object # @newval
.globl newval
.p2align 4, 0x0
newval:
.zero 4000008
.size newval, 4000008
.type v,@object # @v
.globl v
.p2align 4, 0x0
v:
.zero 4000008
.size v, 4000008
.type o,@object # @o
.globl o
.p2align 4, 0x0
o:
.zero 4000008
.size o, 4000008
.type n,@object # @n
.globl n
.p2align 4, 0x0
n:
.zero 4000008
.size n, 4000008
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Enter number of points along vibrating string [%d-%d]: "
.size .L.str, 56
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "%s"
.size .L.str.1, 3
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "Invalid. Please enter value between %d and %d\n"
.size .L.str.2, 47
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "Enter number of time steps [1-%d]: "
.size .L.str.3, 36
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "Invalid. Please enter value between 1 and %d\n"
.size .L.str.4, 46
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "Using points = %d, steps = %d\n"
.size .L.str.5, 31
.type _Z14do_math_kernelPfS_S_ii,@object # @_Z14do_math_kernelPfS_S_ii
.section .rodata,"a",@progbits
.globl _Z14do_math_kernelPfS_S_ii
.p2align 3, 0x0
_Z14do_math_kernelPfS_S_ii:
.quad _Z29__device_stub__do_math_kernelPfS_S_ii
.size _Z14do_math_kernelPfS_S_ii, 8
.type .L.str.6,@object # @.str.6
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.6:
.asciz "%6.4f "
.size .L.str.6, 7
.type .L.str.9,@object # @.str.9
.L.str.9:
.asciz "%d are wrong\n"
.size .L.str.9, 14
.type .L.str.10,@object # @.str.10
.L.str.10:
.asciz "%d"
.size .L.str.10, 3
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z14do_math_kernelPfS_S_ii"
.size .L__unnamed_1, 27
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "right"
.size .Lstr, 6
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "Initializing points on the line..."
.size .Lstr.1, 35
.type .Lstr.2,@object # @str.2
.Lstr.2:
.asciz "Updating all points for all time steps..."
.size .Lstr.2, 42
.type .Lstr.3,@object # @str.3
.Lstr.3:
.asciz "Printing final results..."
.size .Lstr.3, 26
.type .Lstr.4,@object # @str.4
.Lstr.4:
.asciz "\nDone.\n"
.size .Lstr.4, 8
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z29__device_stub__do_math_kernelPfS_S_ii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym nsteps
.addrsig_sym tpoints
.addrsig_sym values
.addrsig_sym oldval
.addrsig_sym _Z14do_math_kernelPfS_S_ii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z14do_math_kernelPfS_S_ii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e220000002500 */
/*0020*/ IMAD.MOV.U32 R7, RZ, RZ, 0x4 ; /* 0x00000004ff077424 */
/* 0x000fe200078e00ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R2, R3, c[0x0][0x0], R0 ; /* 0x0000000003027a24 */
/* 0x001fc800078e0200 */
/*0060*/ IMAD.WIDE R4, R2, R7, c[0x0][0x168] ; /* 0x00005a0002047625 */
/* 0x000fc800078e0207 */
/*0070*/ IMAD.WIDE R2, R2, R7, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fe400078e0207 */
/*0080*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea8000c1e1900 */
/*0090*/ LDG.E R7, [R2.64] ; /* 0x0000000402077981 */
/* 0x000ee2000c1e1900 */
/*00a0*/ IMAD.MOV.U32 R6, RZ, RZ, c[0x0][0x178] ; /* 0x00005e00ff067624 */
/* 0x000fca00078e00ff */
/*00b0*/ ISETP.GE.AND P0, PT, R6, 0x1, PT ; /* 0x000000010600780c */
/* 0x000fe20003f06270 */
/*00c0*/ STS [R0.X4+0x1000], R5 ; /* 0x0010000500007388 */
/* 0x0041e80000004800 */
/*00d0*/ STS [R0.X4], R7 ; /* 0x0000000700007388 */
/* 0x0081f00000004800 */
/*00e0*/ @!P0 BRA 0x3c0 ; /* 0x000002d000008947 */
/* 0x000fea0003800000 */
/*00f0*/ IADD3 R8, R6.reuse, -0x1, RZ ; /* 0xffffffff06087810 */
/* 0x040fe20007ffe0ff */
/*0100*/ IMAD.MOV.U32 R17, RZ, RZ, R7 ; /* 0x000000ffff117224 */
/* 0x000fe200078e0007 */
/*0110*/ LOP3.LUT R4, R6, 0x3, RZ, 0xc0, !PT ; /* 0x0000000306047812 */
/* 0x000fe200078ec0ff */
/*0120*/ IMAD.MOV.U32 R15, RZ, RZ, R5 ; /* 0x000000ffff0f7224 */
/* 0x000fe200078e0005 */
/*0130*/ ISETP.GE.U32.AND P1, PT, R8, 0x3, PT ; /* 0x000000030800780c */
/* 0x000fc40003f26070 */
/*0140*/ ISETP.NE.AND P0, PT, R4, RZ, PT ; /* 0x000000ff0400720c */
/* 0x000fd60003f05270 */
/*0150*/ @!P1 BRA 0x2f0 ; /* 0x0000019000009947 */
/* 0x000fea0003800000 */
/*0160*/ IADD3 R5, -R4, c[0x0][0x178], RZ ; /* 0x00005e0004057a10 */
/* 0x001fc60007ffe1ff */
/*0170*/ F2F.F64.F32 R6, R17 ; /* 0x0000001100067310 */
/* 0x003e220000201800 */
/*0180*/ IADD3 R5, R5, -0x4, RZ ; /* 0xfffffffc05057810 */
/* 0x000fc80007ffe0ff */
/*0190*/ ISETP.NE.AND P1, PT, R5, RZ, PT ; /* 0x000000ff0500720c */
/* 0x000fc60003f25270 */
/*01a0*/ F2F.F64.F32 R10, R15 ; /* 0x0000000f000a7310 */
/* 0x000e620000201800 */
/*01b0*/ DADD R8, R6, R6 ; /* 0x0000000006087229 */
/* 0x001e4c0000000006 */
/*01c0*/ DADD R8, R8, -R10 ; /* 0x0000000008087229 */
/* 0x002e0c000000080a */
/*01d0*/ DFMA R8, R6, c[0x2][0x0], R8 ; /* 0x0080000006087a2b */
/* 0x001e0c0000000008 */
/*01e0*/ F2F.F32.F64 R10, R8 ; /* 0x00000008000a7310 */
/* 0x001e300000301000 */
/*01f0*/ F2F.F64.F32 R10, R10 ; /* 0x0000000a000a7310 */
/* 0x001e240000201800 */
/*0200*/ DADD R12, R10, R10 ; /* 0x000000000a0c7229 */
/* 0x001e0c000000000a */
/*0210*/ DADD R12, -R6, R12 ; /* 0x00000000060c7229 */
/* 0x001e0c000000010c */
/*0220*/ DFMA R12, R10, c[0x2][0x0], R12 ; /* 0x008000000a0c7a2b */
/* 0x001e0c000000000c */
/*0230*/ F2F.F32.F64 R6, R12 ; /* 0x0000000c00067310 */
/* 0x001e300000301000 */
/*0240*/ F2F.F64.F32 R6, R6 ; /* 0x0000000600067310 */
/* 0x001e240000201800 */
/*0250*/ DADD R14, R6, R6 ; /* 0x00000000060e7229 */
/* 0x001e0c0000000006 */
/*0260*/ DADD R14, -R10, R14 ; /* 0x000000000a0e7229 */
/* 0x001e0c000000010e */
/*0270*/ DFMA R14, R6, c[0x2][0x0], R14 ; /* 0x00800000060e7a2b */
/* 0x001e14000000000e */
/*0280*/ F2F.F32.F64 R15, R14 ; /* 0x0000000e000f7310 */
/* 0x001e300000301000 */
/*0290*/ F2F.F64.F32 R8, R15 ; /* 0x0000000f00087310 */
/* 0x001e240000201800 */
/*02a0*/ DADD R10, R8, R8 ; /* 0x00000000080a7229 */
/* 0x001e0c0000000008 */
/*02b0*/ DADD R10, -R6, R10 ; /* 0x00000000060a7229 */
/* 0x001e0c000000010a */
/*02c0*/ DFMA R10, R8, c[0x2][0x0], R10 ; /* 0x00800000080a7a2b */
/* 0x001e0c000000000a */
/*02d0*/ F2F.F32.F64 R17, R10 ; /* 0x0000000a00117310 */
/* 0x0010620000301000 */
/*02e0*/ @P1 BRA 0x170 ; /* 0xfffffe8000001947 */
/* 0x000fea000383ffff */
/*02f0*/ @!P0 BRA 0x3a0 ; /* 0x000000a000008947 */
/* 0x000fea0003800000 */
/*0300*/ F2F.F64.F32 R8, R17 ; /* 0x0000001100087310 */
/* 0x002e620000201800 */
/*0310*/ IADD3 R4, R4, -0x1, RZ ; /* 0xffffffff04047810 */
/* 0x000fc80007ffe0ff */
/*0320*/ ISETP.NE.AND P0, PT, R4, RZ, PT ; /* 0x000000ff0400720c */
/* 0x000fc60003f05270 */
/*0330*/ F2F.F64.F32 R6, R15 ; /* 0x0000000f00067310 */
/* 0x0010a20000201800 */
/*0340*/ DADD R10, R8, R8 ; /* 0x00000000080a7229 */
/* 0x002ea20000000008 */
/*0350*/ IMAD.MOV.U32 R15, RZ, RZ, R17 ; /* 0x000000ffff0f7224 */
/* 0x001fca00078e0011 */
/*0360*/ DADD R6, R10, -R6 ; /* 0x000000000a067229 */
/* 0x004e0c0000000806 */
/*0370*/ DFMA R6, R8, c[0x2][0x0], R6 ; /* 0x0080000008067a2b */
/* 0x001e0c0000000006 */
/*0380*/ F2F.F32.F64 R17, R6 ; /* 0x0000000600117310 */
/* 0x0010620000301000 */
/*0390*/ @P0 BRA 0x300 ; /* 0xffffff6000000947 */
/* 0x000fea000383ffff */
/*03a0*/ STS [R0.X4], R17 ; /* 0x0000001100007388 */
/* 0x0023e80000004800 */
/*03b0*/ STS [R0.X4+0x1000], R15 ; /* 0x0010000f00007388 */
/* 0x0003e40000004800 */
/*03c0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*03d0*/ LDS R5, [R0.X4] ; /* 0x0000000000057984 */
/* 0x001e280000004800 */
/*03e0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x001fe2000c101904 */
/*03f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0400*/ BRA 0x400; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0410*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0420*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0430*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0440*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0450*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0460*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0470*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0480*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0490*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z14do_math_kernelPfS_S_ii
.globl _Z14do_math_kernelPfS_S_ii
.p2align 8
.type _Z14do_math_kernelPfS_S_ii,@function
_Z14do_math_kernelPfS_S_ii:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b128 s[4:7], s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_load_b32 s2, s[0:1], 0x18
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[3:4], 2, v[1:2]
v_add_co_u32 v1, vcc_lo, s4, v3
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v2, vcc_lo, s5, v4, vcc_lo
v_add_co_u32 v3, vcc_lo, s6, v3
v_add_co_ci_u32_e32 v4, vcc_lo, s7, v4, vcc_lo
s_waitcnt lgkmcnt(0)
s_cmp_lt_i32 s2, 1
global_load_b32 v5, v[1:2], off
global_load_b32 v4, v[3:4], off
v_lshlrev_b32_e32 v3, 2, v0
s_delay_alu instid0(VALU_DEP_1)
v_or_b32_e32 v0, 0x1000, v3
s_waitcnt vmcnt(0)
ds_store_2addr_stride64_b32 v3, v4, v5 offset1:16
s_cbranch_scc1 .LBB0_4
ds_load_b32 v5, v0
ds_load_b32 v4, v3
s_mov_b32 s1, 0xbfc70a3d
s_brev_b32 s0, 1
.LBB0_2:
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_dual_mov_b32 v7, v4 :: v_dual_mov_b32 v4, v5
s_add_i32 s2, s2, -1
s_cmp_eq_u32 s2, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cvt_f64_f32_e32 v[7:8], v7
v_cvt_f64_f32_e32 v[5:6], v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f64 v[7:8], v[5:6], 2.0, -v[7:8]
v_fma_f64 v[5:6], v[5:6], s[0:1], v[7:8]
s_delay_alu instid0(VALU_DEP_1)
v_cvt_f32_f64_e32 v5, v[5:6]
s_cbranch_scc0 .LBB0_2
ds_store_b32 v0, v5
ds_store_b32 v3, v4
.LBB0_4:
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
ds_load_b32 v0, v0
s_waitcnt lgkmcnt(0)
global_store_b32 v[1:2], v0, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z14do_math_kernelPfS_S_ii
.amdhsa_group_segment_fixed_size 8192
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 9
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z14do_math_kernelPfS_S_ii, .Lfunc_end0-_Z14do_math_kernelPfS_S_ii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 8192
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z14do_math_kernelPfS_S_ii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z14do_math_kernelPfS_S_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 9
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0015fe2f_00000000-6_wave.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2067:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2067:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "Enter number of points along vibrating string [%d-%d]: "
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "%s"
.section .rodata.str1.8
.align 8
.LC2:
.string "Invalid. Please enter value between %d and %d\n"
.align 8
.LC3:
.string "Enter number of time steps [1-%d]: "
.align 8
.LC4:
.string "Invalid. Please enter value between 1 and %d\n"
.align 8
.LC5:
.string "Using points = %d, steps = %d\n"
.text
.globl _Z11check_paramv
.type _Z11check_paramv, @function
_Z11check_paramv:
.LFB2057:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $32, %rsp
.cfi_def_cfa_offset 64
movq %fs:40, %rax
movq %rax, 24(%rsp)
xorl %eax, %eax
leaq .LC0(%rip), %r12
movq %rsp, %rbx
leaq .LC1(%rip), %rbp
.L5:
movl tpoints(%rip), %eax
subl $20, %eax
cmpl $999980, %eax
jbe .L14
movq %r12, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %rbx, %rsi
movq %rbp, %rdi
movl $0, %eax
call __isoc23_scanf@PLT
movl $10, %edx
movl $0, %esi
movq %rbx, %rdi
call __isoc23_strtol@PLT
movl %eax, tpoints(%rip)
subl $20, %eax
cmpl $999980, %eax
jbe .L5
movl $1000000, %ecx
movl $20, %edx
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L5
.L14:
leaq .LC3(%rip), %r12
movq %rsp, %rbx
leaq .LC1(%rip), %rbp
.L8:
movl nsteps(%rip), %ecx
leal -1(%rcx), %eax
cmpl $999999, %eax
jbe .L15
movl $1000000, %edx
movq %r12, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %rbx, %rsi
movq %rbp, %rdi
movl $0, %eax
call __isoc23_scanf@PLT
movl $10, %edx
movl $0, %esi
movq %rbx, %rdi
call __isoc23_strtol@PLT
movl %eax, nsteps(%rip)
subl $1, %eax
cmpl $999999, %eax
jbe .L8
movl $1000000, %edx
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L8
.L15:
movl tpoints(%rip), %edx
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 24(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $32, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L16:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size _Z11check_paramv, .-_Z11check_paramv
.globl _Z9init_linev
.type _Z9init_linev, @function
_Z9init_linev:
.LFB2058:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $16, %rsp
.cfi_def_cfa_offset 64
movl tpoints(%rip), %eax
leal -1(%rax), %edx
pxor %xmm3, %xmm3
cvtsi2ssl %edx, %xmm3
movss %xmm3, 12(%rsp)
testl %eax, %eax
jle .L17
leaq 4+values(%rip), %rbx
leaq 4+v(%rip), %r12
leal -1(%rax), %r13d
leaq 4(%rbx), %rax
leaq (%rax,%r13,4), %r14
movl $0x00000000, %ebp
.L19:
movd %ebp, %xmm0
divss 12(%rsp), %xmm0
mulss .LC7(%rip), %xmm0
call sinf@PLT
movss %xmm0, (%rbx)
movss %xmm0, (%r12)
movd %ebp, %xmm1
addss .LC8(%rip), %xmm1
movd %xmm1, %ebp
addq $4, %rbx
addq $4, %r12
cmpq %r14, %rbx
jne .L19
addq $2, %r13
movl $1, %eax
leaq oldval(%rip), %rdi
leaq values(%rip), %rsi
leaq o(%rip), %rcx
leaq v(%rip), %rdx
.L20:
movss (%rsi,%rax,4), %xmm0
movss %xmm0, (%rdi,%rax,4)
movss (%rdx,%rax,4), %xmm0
movss %xmm0, (%rcx,%rax,4)
addq $1, %rax
cmpq %r13, %rax
jne .L20
.L17:
addq $16, %rsp
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2058:
.size _Z9init_linev, .-_Z9init_linev
.globl _Z7do_mathi
.type _Z7do_mathi, @function
_Z7do_mathi:
.LFB2059:
.cfi_startproc
endbr64
movslq %edi, %rdi
leaq values(%rip), %rax
pxor %xmm1, %xmm1
cvtss2sd (%rax,%rdi,4), %xmm1
movapd %xmm1, %xmm0
addsd %xmm1, %xmm0
leaq oldval(%rip), %rax
pxor %xmm2, %xmm2
cvtss2sd (%rax,%rdi,4), %xmm2
subsd %xmm2, %xmm0
mulsd .LC9(%rip), %xmm1
addsd %xmm1, %xmm0
cvtsd2ss %xmm0, %xmm0
leaq newval(%rip), %rax
movss %xmm0, (%rax,%rdi,4)
ret
.cfi_endproc
.LFE2059:
.size _Z7do_mathi, .-_Z7do_mathi
.globl _Z6updatev
.type _Z6updatev, @function
_Z6updatev:
.LFB2061:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
movl $1, %r14d
leaq newval(%rip), %r12
leaq oldval(%rip), %r13
leaq values(%rip), %rbp
cmpl $0, nsteps(%rip)
jg .L26
.L25:
popq %rbx
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L36:
.cfi_restore_state
movl $0x00000000, (%r12,%rbx,4)
.L30:
movl tpoints(%rip), %eax
addq $1, %rbx
cmpl %ebx, %eax
jl .L42
.L31:
cmpl %ebx, %eax
je .L36
cmpl $1, %ebx
je .L36
movl %ebx, %edi
call _Z7do_mathi
jmp .L30
.L42:
testl %eax, %eax
jle .L32
leal 1(%rax), %edx
movl $1, %eax
.L33:
movss 0(%rbp,%rax,4), %xmm0
movss %xmm0, 0(%r13,%rax,4)
movss (%r12,%rax,4), %xmm0
movss %xmm0, 0(%rbp,%rax,4)
addq $1, %rax
cmpq %rdx, %rax
jne .L33
.L32:
addl $1, %r14d
cmpl %r14d, nsteps(%rip)
jl .L25
.L26:
movl tpoints(%rip), %eax
movl $1, %ebx
testl %eax, %eax
jg .L31
jmp .L32
.cfi_endproc
.LFE2061:
.size _Z6updatev, .-_Z6updatev
.section .rodata.str1.1
.LC10:
.string "%6.4f "
.LC11:
.string "\n"
.text
.globl _Z10printfinalv
.type _Z10printfinalv, @function
_Z10printfinalv:
.LFB2062:
.cfi_startproc
endbr64
cmpl $0, tpoints(%rip)
jle .L49
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $8, %rsp
.cfi_def_cfa_offset 48
movl $1, %ebx
leaq values(%rip), %r12
leaq .LC10(%rip), %rbp
leaq .LC11(%rip), %r13
jmp .L46
.L45:
addq $1, %rbx
cmpl %ebx, tpoints(%rip)
jl .L52
.L46:
pxor %xmm0, %xmm0
cvtss2sd (%r12,%rbx,4), %xmm0
movq %rbp, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movslq %ebx, %rax
imulq $1717986919, %rax, %rax
sarq $34, %rax
movl %ebx, %edx
sarl $31, %edx
subl %edx, %eax
leal (%rax,%rax,4), %eax
addl %eax, %eax
cmpl %ebx, %eax
jne .L45
movq %r13, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L45
.L52:
addq $8, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L49:
.cfi_restore 3
.cfi_restore 6
.cfi_restore 12
.cfi_restore 13
ret
.cfi_endproc
.LFE2062:
.size _Z10printfinalv, .-_Z10printfinalv
.section .rodata.str1.1
.LC12:
.string "right\n"
.LC13:
.string "%d are wrong\n"
.text
.globl _Z12check_answerv
.type _Z12check_answerv, @function
_Z12check_answerv:
.LFB2063:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movl tpoints(%rip), %eax
testl %eax, %eax
jle .L54
leal 1(%rax), %edi
movl $1, %eax
movl $0, %esi
movl $0, %r8d
leaq values(%rip), %rcx
leaq v(%rip), %rdx
movl $1, %r9d
jmp .L57
.L60:
addl $1, %esi
movl %r9d, %r8d
.L55:
addq $1, %rax
cmpq %rax, %rdi
je .L63
.L57:
movss (%rcx,%rax,4), %xmm0
ucomiss (%rdx,%rax,4), %xmm0
jp .L60
je .L55
jmp .L60
.L63:
testl %r8d, %r8d
je .L54
movl %esi, %edx
leaq .LC13(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L53
.L54:
leaq .LC12(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
.L53:
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2063:
.size _Z12check_answerv, .-_Z12check_answerv
.globl _Z40__device_stub__Z14do_math_kernelPfS_S_iiPfS_S_ii
.type _Z40__device_stub__Z14do_math_kernelPfS_S_iiPfS_S_ii, @function
_Z40__device_stub__Z14do_math_kernelPfS_S_iiPfS_S_ii:
.LFB2089:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movl %r8d, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movq %rsp, %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L68
.L64:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L69
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L68:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z14do_math_kernelPfS_S_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L64
.L69:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2089:
.size _Z40__device_stub__Z14do_math_kernelPfS_S_iiPfS_S_ii, .-_Z40__device_stub__Z14do_math_kernelPfS_S_iiPfS_S_ii
.globl _Z14do_math_kernelPfS_S_ii
.type _Z14do_math_kernelPfS_S_ii, @function
_Z14do_math_kernelPfS_S_ii:
.LFB2090:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z40__device_stub__Z14do_math_kernelPfS_S_iiPfS_S_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2090:
.size _Z14do_math_kernelPfS_S_ii, .-_Z14do_math_kernelPfS_S_ii
.globl _Z14updateOnDevicev
.type _Z14updateOnDevicev, @function
_Z14updateOnDevicev:
.LFB2060:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
subq $64, %rsp
.cfi_def_cfa_offset 80
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
movl tpoints(%rip), %eax
leal -8(,%rax,4), %ebx
movslq %ebx, %rbx
leaq 8(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbx, %rdx
leaq 8+values(%rip), %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
leaq 16(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbx, %rdx
leaq 8+oldval(%rip), %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
leaq 24(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movl tpoints(%rip), %edx
movl $1024, %eax
cmpl %eax, %edx
cmovle %edx, %eax
movl %eax, 32(%rsp)
movl $1, 36(%rsp)
leal 1023(%rdx), %eax
testl %edx, %edx
cmovns %edx, %eax
sarl $10, %eax
addl $1, %eax
movl %eax, 44(%rsp)
movl $1, 48(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 32(%rsp), %rdx
movl $1, %ecx
movq 44(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L76
.L73:
movl $2, %ecx
movq %rbx, %rdx
movq 8(%rsp), %rsi
leaq 8+values(%rip), %rdi
call cudaMemcpy@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L77
addq $64, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
ret
.L76:
.cfi_restore_state
movl tpoints(%rip), %r8d
movl nsteps(%rip), %ecx
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z40__device_stub__Z14do_math_kernelPfS_S_iiPfS_S_ii
jmp .L73
.L77:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2060:
.size _Z14updateOnDevicev, .-_Z14updateOnDevicev
.section .rodata.str1.1
.LC14:
.string "%d"
.section .rodata.str1.8
.align 8
.LC15:
.string "Initializing points on the line...\n"
.align 8
.LC16:
.string "Updating all points for all time steps...\n"
.section .rodata.str1.1
.LC17:
.string "Printing final results...\n"
.LC18:
.string "\nDone.\n\n"
.text
.globl main
.type main, @function
main:
.LFB2064:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $8, %rsp
.cfi_def_cfa_offset 32
movq %rsi, %rbx
movq 8(%rsi), %rdi
leaq tpoints(%rip), %rdx
leaq .LC14(%rip), %rbp
movq %rbp, %rsi
movl $0, %eax
call __isoc23_sscanf@PLT
movq 16(%rbx), %rdi
leaq nsteps(%rip), %rdx
movq %rbp, %rsi
movl $0, %eax
call __isoc23_sscanf@PLT
call _Z11check_paramv
leaq .LC15(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
call _Z9init_linev
leaq .LC16(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
call _Z14updateOnDevicev
leaq .LC17(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
call _Z10printfinalv
leaq .LC18(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %eax
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2064:
.size main, .-main
.section .rodata.str1.1
.LC19:
.string "_Z14do_math_kernelPfS_S_ii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2092:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC19(%rip), %rdx
movq %rdx, %rcx
leaq _Z14do_math_kernelPfS_S_ii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2092:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.globl n
.bss
.align 32
.type n, @object
.size n, 4000008
n:
.zero 4000008
.globl o
.align 32
.type o, @object
.size o, 4000008
o:
.zero 4000008
.globl v
.align 32
.type v, @object
.size v, 4000008
v:
.zero 4000008
.globl newval
.align 32
.type newval, @object
.size newval, 4000008
newval:
.zero 4000008
.globl oldval
.align 32
.type oldval, @object
.size oldval, 4000008
oldval:
.zero 4000008
.globl values
.align 32
.type values, @object
.size values, 4000008
values:
.zero 4000008
.globl rcode
.align 4
.type rcode, @object
.size rcode, 4
rcode:
.zero 4
.globl tpoints
.align 4
.type tpoints, @object
.size tpoints, 4
tpoints:
.zero 4
.globl nsteps
.align 4
.type nsteps, @object
.size nsteps, 4
nsteps:
.zero 4
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC7:
.long 1086918619
.align 4
.LC8:
.long 1065353216
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC9:
.long -2147483648
.long -1077474755
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "wave.hip"
.globl _Z11check_paramv # -- Begin function _Z11check_paramv
.p2align 4, 0x90
.type _Z11check_paramv,@function
_Z11check_paramv: # @_Z11check_paramv
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
subq $24, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -24
.cfi_offset %rbp, -16
movl $-1000001, %ebp # imm = 0xFFF0BDBF
movl tpoints(%rip), %eax
addl %ebp, %eax
cmpl $-999982, %eax # imm = 0xFFF0BDD2
ja .LBB0_5
# %bb.1:
movq %rsp, %rbx
jmp .LBB0_2
.p2align 4, 0x90
.LBB0_4: # in Loop: Header=BB0_2 Depth=1
movl tpoints(%rip), %eax
addl %ebp, %eax
cmpl $-999981, %eax # imm = 0xFFF0BDD3
jae .LBB0_5
.LBB0_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movl $.L.str, %edi
xorl %eax, %eax
callq printf
movl $.L.str.1, %edi
movq %rbx, %rsi
xorl %eax, %eax
callq __isoc23_scanf
movq %rbx, %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movl %eax, tpoints(%rip)
addl $-1000001, %eax # imm = 0xFFF0BDBF
cmpl $-999982, %eax # imm = 0xFFF0BDD2
ja .LBB0_4
# %bb.3: # in Loop: Header=BB0_2 Depth=1
movl $.L.str.2, %edi
movl $20, %esi
movl $1000000, %edx # imm = 0xF4240
xorl %eax, %eax
callq printf
jmp .LBB0_4
.LBB0_5: # %.preheader
movl nsteps(%rip), %edx
leal -1000001(%rdx), %eax
cmpl $-1000001, %eax # imm = 0xFFF0BDBF
ja .LBB0_10
# %bb.6:
movq %rsp, %rbx
jmp .LBB0_7
.p2align 4, 0x90
.LBB0_9: # in Loop: Header=BB0_7 Depth=1
movl nsteps(%rip), %edx
leal -1000001(%rdx), %eax
cmpl $-1000000, %eax # imm = 0xFFF0BDC0
jae .LBB0_10
.LBB0_7: # %.lr.ph4
# =>This Inner Loop Header: Depth=1
movl $.L.str.3, %edi
movl $1000000, %esi # imm = 0xF4240
xorl %eax, %eax
callq printf
movl $.L.str.1, %edi
movq %rbx, %rsi
xorl %eax, %eax
callq __isoc23_scanf
movq %rbx, %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movl %eax, nsteps(%rip)
addl $-1000001, %eax # imm = 0xFFF0BDBF
cmpl $-1000001, %eax # imm = 0xFFF0BDBF
ja .LBB0_9
# %bb.8: # in Loop: Header=BB0_7 Depth=1
movl $.L.str.4, %edi
movl $1000000, %esi # imm = 0xF4240
xorl %eax, %eax
callq printf
jmp .LBB0_9
.LBB0_10: # %._crit_edge
movl tpoints(%rip), %esi
movl $.L.str.5, %edi
# kill: def $edx killed $edx killed $rdx
xorl %eax, %eax
callq printf
addq $24, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size _Z11check_paramv, .Lfunc_end0-_Z11check_paramv
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function _Z9init_linev
.LCPI1_0:
.long 0x40c90fdb # float 6.28318548
.LCPI1_1:
.long 0x3f800000 # float 1
.text
.globl _Z9init_linev
.p2align 4, 0x90
.type _Z9init_linev,@function
_Z9init_linev: # @_Z9init_linev
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $16, %rsp
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -16
movl tpoints(%rip), %eax
testl %eax, %eax
jle .LBB1_3
# %bb.1: # %.lr.ph.preheader
decl %eax
cvtsi2ss %eax, %xmm0
movss %xmm0, 12(%rsp) # 4-byte Spill
xorps %xmm0, %xmm0
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB1_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movss %xmm0, 8(%rsp) # 4-byte Spill
movss 8(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
divss 12(%rsp), %xmm0 # 4-byte Folded Reload
mulss .LCPI1_0(%rip), %xmm0
callq sinf
movss %xmm0, values+4(,%rbx,4)
movss %xmm0, v+4(,%rbx,4)
movss 8(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
addss .LCPI1_1(%rip), %xmm0
movslq tpoints(%rip), %rax
incq %rbx
cmpq %rax, %rbx
jl .LBB1_2
.LBB1_3: # %.preheader
testl %eax, %eax
jle .LBB1_4
# %bb.5: # %.lr.ph23.preheader
movl %eax, %ebx
shlq $2, %rbx
movl $oldval+4, %edi
movl $values+4, %esi
movq %rbx, %rdx
callq memcpy@PLT
movl $o+4, %edi
movl $v+4, %esi
movq %rbx, %rdx
addq $16, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp memcpy@PLT # TAILCALL
.LBB1_4: # %._crit_edge
.cfi_def_cfa_offset 32
addq $16, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z9init_linev, .Lfunc_end1-_Z9init_linev
.cfi_endproc
# -- End function
.globl _Z29__device_stub__do_math_kernelPfS_S_ii # -- Begin function _Z29__device_stub__do_math_kernelPfS_S_ii
.p2align 4, 0x90
.type _Z29__device_stub__do_math_kernelPfS_S_ii,@function
_Z29__device_stub__do_math_kernelPfS_S_ii: # @_Z29__device_stub__do_math_kernelPfS_S_ii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
movl %r8d, (%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
movq %rsp, %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z14do_math_kernelPfS_S_ii, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end2:
.size _Z29__device_stub__do_math_kernelPfS_S_ii, .Lfunc_end2-_Z29__device_stub__do_math_kernelPfS_S_ii
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function _Z7do_mathi
.LCPI3_0:
.quad 0xbfc70a3d80000000 # double -0.18000000715255737
.text
.globl _Z7do_mathi
.p2align 4, 0x90
.type _Z7do_mathi,@function
_Z7do_mathi: # @_Z7do_mathi
.cfi_startproc
# %bb.0:
movslq %edi, %rax
movss values(,%rax,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movaps %xmm0, %xmm1
addsd %xmm0, %xmm1
movss oldval(,%rax,4), %xmm2 # xmm2 = mem[0],zero,zero,zero
cvtss2sd %xmm2, %xmm2
subsd %xmm2, %xmm1
mulsd .LCPI3_0(%rip), %xmm0
addsd %xmm1, %xmm0
cvtsd2ss %xmm0, %xmm0
movss %xmm0, newval(,%rax,4)
retq
.Lfunc_end3:
.size _Z7do_mathi, .Lfunc_end3-_Z7do_mathi
.cfi_endproc
# -- End function
.globl _Z14updateOnDevicev # -- Begin function _Z14updateOnDevicev
.p2align 4, 0x90
.type _Z14updateOnDevicev,@function
_Z14updateOnDevicev: # @_Z14updateOnDevicev
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $160, %rsp
.cfi_def_cfa_offset 176
.cfi_offset %rbx, -16
movl tpoints(%rip), %eax
leal -8(,%rax,4), %eax
movslq %eax, %rbx
leaq 8(%rsp), %rdi
movq %rbx, %rsi
callq hipMalloc
movq 8(%rsp), %rdi
movl $values+8, %esi
movq %rbx, %rdx
movl $1, %ecx
callq hipMemcpy
leaq 16(%rsp), %rdi
movq %rbx, %rsi
callq hipMalloc
movq 16(%rsp), %rdi
movl $oldval+8, %esi
movq %rbx, %rdx
movl $1, %ecx
callq hipMemcpy
leaq 32(%rsp), %rdi
movq %rbx, %rsi
callq hipMalloc
movl tpoints(%rip), %eax
leal 1023(%rax), %edi
testl %eax, %eax
cmovnsl %eax, %edi
sarl $10, %edi
incl %edi
cmpl $1024, %eax # imm = 0x400
movl $1024, %edx # imm = 0x400
cmovll %eax, %edx
movabsq $4294967296, %rax # imm = 0x100000000
orq %rax, %rdx
orq %rax, %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB4_2
# %bb.1:
movq 8(%rsp), %rax
movq 16(%rsp), %rcx
movq 32(%rsp), %rdx
movl nsteps(%rip), %esi
movl tpoints(%rip), %edi
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movq %rdx, 88(%rsp)
movl %esi, 28(%rsp)
movl %edi, 24(%rsp)
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 88(%rsp), %rax
movq %rax, 128(%rsp)
leaq 28(%rsp), %rax
movq %rax, 136(%rsp)
leaq 24(%rsp), %rax
movq %rax, 144(%rsp)
leaq 72(%rsp), %rdi
leaq 56(%rsp), %rsi
leaq 48(%rsp), %rdx
leaq 40(%rsp), %rcx
callq __hipPopCallConfiguration
movq 72(%rsp), %rsi
movl 80(%rsp), %edx
movq 56(%rsp), %rcx
movl 64(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z14do_math_kernelPfS_S_ii, %edi
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB4_2:
movq 8(%rsp), %rsi
movl $values+8, %edi
movq %rbx, %rdx
movl $2, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq 32(%rsp), %rdi
callq hipFree
addq $160, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
retq
.Lfunc_end4:
.size _Z14updateOnDevicev, .Lfunc_end4-_Z14updateOnDevicev
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function _Z6updatev
.LCPI5_0:
.quad 0xbfc70a3d80000000 # double -0.18000000715255737
.text
.globl _Z6updatev
.p2align 4, 0x90
.type _Z6updatev,@function
_Z6updatev: # @_Z6updatev
.cfi_startproc
# %bb.0:
movl nsteps(%rip), %eax
testl %eax, %eax
jle .LBB5_12
# %bb.1: # %.preheader18.lr.ph
movl tpoints(%rip), %ecx
leal 1(%rcx), %edx
leaq -4(,%rcx,4), %rsi
leaq -4(,%rdx,4), %rdi
shlq $2, %rdx
movl $1, %r8d
movsd .LCPI5_0(%rip), %xmm0 # xmm0 = mem[0],zero
jmp .LBB5_2
.p2align 4, 0x90
.LBB5_11: # %._crit_edge
# in Loop: Header=BB5_2 Depth=1
leal 1(%r8), %r9d
cmpl %eax, %r8d
movl %r9d, %r8d
je .LBB5_12
.LBB5_2: # %.preheader18
# =>This Loop Header: Depth=1
# Child Loop BB5_4 Depth 2
# Child Loop BB5_10 Depth 2
testl %ecx, %ecx
jle .LBB5_8
# %bb.3: # %.lr.ph.preheader
# in Loop: Header=BB5_2 Depth=1
xorl %r9d, %r9d
jmp .LBB5_4
.p2align 4, 0x90
.LBB5_7: # in Loop: Header=BB5_4 Depth=2
movss %xmm1, newval+4(%r9)
addq $4, %r9
cmpq %r9, %rdi
je .LBB5_8
.LBB5_4: # %.lr.ph
# Parent Loop BB5_2 Depth=1
# => This Inner Loop Header: Depth=2
xorps %xmm1, %xmm1
testq %r9, %r9
je .LBB5_7
# %bb.5: # %.lr.ph
# in Loop: Header=BB5_4 Depth=2
cmpq %r9, %rsi
je .LBB5_7
# %bb.6: # in Loop: Header=BB5_4 Depth=2
movss values+4(%r9), %xmm1 # xmm1 = mem[0],zero,zero,zero
cvtss2sd %xmm1, %xmm1
movaps %xmm1, %xmm2
addsd %xmm1, %xmm2
movss oldval+4(%r9), %xmm3 # xmm3 = mem[0],zero,zero,zero
cvtss2sd %xmm3, %xmm3
subsd %xmm3, %xmm2
mulsd %xmm0, %xmm1
addsd %xmm2, %xmm1
cvtsd2ss %xmm1, %xmm1
jmp .LBB5_7
.p2align 4, 0x90
.LBB5_8: # %.preheader
# in Loop: Header=BB5_2 Depth=1
testl %ecx, %ecx
jle .LBB5_11
# %bb.9: # %.lr.ph23.preheader
# in Loop: Header=BB5_2 Depth=1
movl $4, %r9d
.p2align 4, 0x90
.LBB5_10: # %.lr.ph23
# Parent Loop BB5_2 Depth=1
# => This Inner Loop Header: Depth=2
movss values(%r9), %xmm1 # xmm1 = mem[0],zero,zero,zero
movss %xmm1, oldval(%r9)
movss newval(%r9), %xmm1 # xmm1 = mem[0],zero,zero,zero
movss %xmm1, values(%r9)
addq $4, %r9
cmpq %r9, %rdx
jne .LBB5_10
jmp .LBB5_11
.LBB5_12: # %._crit_edge26
retq
.Lfunc_end5:
.size _Z6updatev, .Lfunc_end5-_Z6updatev
.cfi_endproc
# -- End function
.globl _Z10printfinalv # -- Begin function _Z10printfinalv
.p2align 4, 0x90
.type _Z10printfinalv,@function
_Z10printfinalv: # @_Z10printfinalv
.cfi_startproc
# %bb.0:
cmpl $0, tpoints(%rip)
jle .LBB6_6
# %bb.1: # %.lr.ph.preheader
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
pushq %rax
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl $1, %ebx
xorl %r14d, %r14d
movl $3435973837, %r15d # imm = 0xCCCCCCCD
jmp .LBB6_2
.p2align 4, 0x90
.LBB6_4: # in Loop: Header=BB6_2 Depth=1
movslq tpoints(%rip), %rax
incq %r14
incl %ebx
cmpq %rax, %r14
jge .LBB6_5
.LBB6_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movl %ebx, %eax
imulq %r15, %rax
shrq $35, %rax
leal (%rax,%rax,4), %eax
leal -1(,%rax,2), %ebp
movss values+4(,%r14,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.6, %edi
movb $1, %al
callq printf
cmpl %r14d, %ebp
jne .LBB6_4
# %bb.3: # in Loop: Header=BB6_2 Depth=1
movl $10, %edi
callq putchar@PLT
jmp .LBB6_4
.LBB6_5:
addq $8, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %r14
.cfi_restore %r15
.cfi_restore %rbp
.LBB6_6: # %._crit_edge
retq
.Lfunc_end6:
.size _Z10printfinalv, .Lfunc_end6-_Z10printfinalv
.cfi_endproc
# -- End function
.globl _Z12check_answerv # -- Begin function _Z12check_answerv
.p2align 4, 0x90
.type _Z12check_answerv,@function
_Z12check_answerv: # @_Z12check_answerv
.cfi_startproc
# %bb.0:
movl tpoints(%rip), %eax
testl %eax, %eax
jle .LBB7_1
# %bb.5: # %.lr.ph.preheader
leaq 4(,%rax,4), %rax
xorl %esi, %esi
movl $4, %ecx
movl $1, %edx
xorl %edi, %edi
.p2align 4, 0x90
.LBB7_6: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movss values(%rcx), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss v(%rcx), %xmm1 # xmm1 = mem[0],zero,zero,zero
ucomiss %xmm1, %xmm0
cmpneqss %xmm1, %xmm0
cmovnel %edx, %edi
cmovpl %edx, %edi
movd %xmm0, %r8d
subl %r8d, %esi
addq $4, %rcx
cmpq %rcx, %rax
jne .LBB7_6
# %bb.2: # %._crit_edge.loopexit
testl %edi, %edi
sete %al
testb %al, %al
je .LBB7_7
.LBB7_4:
movl $.Lstr, %edi
jmp puts@PLT # TAILCALL
.LBB7_1:
xorl %esi, %esi
movb $1, %al
testb %al, %al
jne .LBB7_4
.LBB7_7:
movl $.L.str.9, %edi
xorl %eax, %eax
jmp printf # TAILCALL
.Lfunc_end7:
.size _Z12check_answerv, .Lfunc_end7-_Z12check_answerv
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function main
.LCPI8_0:
.long 0x40c90fdb # float 6.28318548
.LCPI8_1:
.long 0x3f800000 # float 1
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
pushq %rax
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rsi, %rbx
movq 8(%rsi), %rdi
movl $.L.str.10, %esi
movl $tpoints, %edx
xorl %eax, %eax
callq __isoc23_sscanf
movq 16(%rbx), %rdi
movl $.L.str.10, %esi
movl $nsteps, %edx
xorl %eax, %eax
callq __isoc23_sscanf
callq _Z11check_paramv
movl $.Lstr.1, %edi
callq puts@PLT
movl tpoints(%rip), %eax
testl %eax, %eax
jle .LBB8_3
# %bb.1: # %.lr.ph.i.preheader
decl %eax
cvtsi2ss %eax, %xmm0
movss %xmm0, 4(%rsp) # 4-byte Spill
xorps %xmm0, %xmm0
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB8_2: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
movss %xmm0, (%rsp) # 4-byte Spill
movss (%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
divss 4(%rsp), %xmm0 # 4-byte Folded Reload
mulss .LCPI8_0(%rip), %xmm0
callq sinf
movss %xmm0, values+4(,%rbx,4)
movss %xmm0, v+4(,%rbx,4)
movss (%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
addss .LCPI8_1(%rip), %xmm0
movslq tpoints(%rip), %rax
incq %rbx
cmpq %rax, %rbx
jl .LBB8_2
.LBB8_3: # %.preheader.i
testl %eax, %eax
jle .LBB8_5
# %bb.4: # %.lr.ph23.preheader.i
movl %eax, %ebx
shlq $2, %rbx
movl $oldval+4, %edi
movl $values+4, %esi
movq %rbx, %rdx
callq memcpy@PLT
movl $o+4, %edi
movl $v+4, %esi
movq %rbx, %rdx
callq memcpy@PLT
.LBB8_5: # %_Z9init_linev.exit
movl $.Lstr.2, %edi
callq puts@PLT
callq _Z14updateOnDevicev
movl $.Lstr.3, %edi
callq puts@PLT
cmpl $0, tpoints(%rip)
jle .LBB8_10
# %bb.6: # %.lr.ph.i5.preheader
movl $1, %ebx
xorl %r14d, %r14d
movl $3435973837, %r15d # imm = 0xCCCCCCCD
jmp .LBB8_7
.p2align 4, 0x90
.LBB8_9: # in Loop: Header=BB8_7 Depth=1
movslq tpoints(%rip), %rax
incq %r14
incl %ebx
cmpq %rax, %r14
jge .LBB8_10
.LBB8_7: # %.lr.ph.i5
# =>This Inner Loop Header: Depth=1
movl %ebx, %eax
imulq %r15, %rax
shrq $35, %rax
leal (%rax,%rax,4), %eax
leal -1(,%rax,2), %ebp
movss values+4(,%r14,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.6, %edi
movb $1, %al
callq printf
cmpl %r14d, %ebp
jne .LBB8_9
# %bb.8: # in Loop: Header=BB8_7 Depth=1
movl $10, %edi
callq putchar@PLT
jmp .LBB8_9
.LBB8_10: # %_Z10printfinalv.exit
movl $.Lstr.4, %edi
callq puts@PLT
xorl %eax, %eax
addq $8, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end8:
.size main, .Lfunc_end8-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB9_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB9_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z14do_math_kernelPfS_S_ii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end9:
.size __hip_module_ctor, .Lfunc_end9-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB10_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB10_2:
retq
.Lfunc_end10:
.size __hip_module_dtor, .Lfunc_end10-__hip_module_dtor
.cfi_endproc
# -- End function
.type nsteps,@object # @nsteps
.bss
.globl nsteps
.p2align 2, 0x0
nsteps:
.long 0 # 0x0
.size nsteps, 4
.type tpoints,@object # @tpoints
.globl tpoints
.p2align 2, 0x0
tpoints:
.long 0 # 0x0
.size tpoints, 4
.type rcode,@object # @rcode
.globl rcode
.p2align 2, 0x0
rcode:
.long 0 # 0x0
.size rcode, 4
.type values,@object # @values
.globl values
.p2align 4, 0x0
values:
.zero 4000008
.size values, 4000008
.type oldval,@object # @oldval
.globl oldval
.p2align 4, 0x0
oldval:
.zero 4000008
.size oldval, 4000008
.type newval,@object # @newval
.globl newval
.p2align 4, 0x0
newval:
.zero 4000008
.size newval, 4000008
.type v,@object # @v
.globl v
.p2align 4, 0x0
v:
.zero 4000008
.size v, 4000008
.type o,@object # @o
.globl o
.p2align 4, 0x0
o:
.zero 4000008
.size o, 4000008
.type n,@object # @n
.globl n
.p2align 4, 0x0
n:
.zero 4000008
.size n, 4000008
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Enter number of points along vibrating string [%d-%d]: "
.size .L.str, 56
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "%s"
.size .L.str.1, 3
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "Invalid. Please enter value between %d and %d\n"
.size .L.str.2, 47
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "Enter number of time steps [1-%d]: "
.size .L.str.3, 36
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "Invalid. Please enter value between 1 and %d\n"
.size .L.str.4, 46
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "Using points = %d, steps = %d\n"
.size .L.str.5, 31
.type _Z14do_math_kernelPfS_S_ii,@object # @_Z14do_math_kernelPfS_S_ii
.section .rodata,"a",@progbits
.globl _Z14do_math_kernelPfS_S_ii
.p2align 3, 0x0
_Z14do_math_kernelPfS_S_ii:
.quad _Z29__device_stub__do_math_kernelPfS_S_ii
.size _Z14do_math_kernelPfS_S_ii, 8
.type .L.str.6,@object # @.str.6
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.6:
.asciz "%6.4f "
.size .L.str.6, 7
.type .L.str.9,@object # @.str.9
.L.str.9:
.asciz "%d are wrong\n"
.size .L.str.9, 14
.type .L.str.10,@object # @.str.10
.L.str.10:
.asciz "%d"
.size .L.str.10, 3
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z14do_math_kernelPfS_S_ii"
.size .L__unnamed_1, 27
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "right"
.size .Lstr, 6
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "Initializing points on the line..."
.size .Lstr.1, 35
.type .Lstr.2,@object # @str.2
.Lstr.2:
.asciz "Updating all points for all time steps..."
.size .Lstr.2, 42
.type .Lstr.3,@object # @str.3
.Lstr.3:
.asciz "Printing final results..."
.size .Lstr.3, 26
.type .Lstr.4,@object # @str.4
.Lstr.4:
.asciz "\nDone.\n"
.size .Lstr.4, 8
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z29__device_stub__do_math_kernelPfS_S_ii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym nsteps
.addrsig_sym tpoints
.addrsig_sym values
.addrsig_sym oldval
.addrsig_sym _Z14do_math_kernelPfS_S_ii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | /* *
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <unistd.h>
#include <assert.h>
#include <string.h>
/*
#define IMG_DIMENSION 32
#define N_IMG_PAIRS 10000
#define NREQUESTS 6
#define N_STREMS 64
#define HIST_SIZE 256
typedef unsigned char uchar;
#define OUT
#define CUDA_CHECK(f) do { \
cudaError_t e = f; \
if (e != cudaSuccess) { \
printf("Cuda failure %s:%d: '%s'\n", __FILE__, __LINE__, cudaGetErrorString(e)); \
exit(1); \
} \
} while (0)
#define SQR(a) ((a) * (a))
#define QUEUE_SIZE 10
__device__ __host__ bool is_in_image_bounds(int i, int j) {
return (i >= 0) && (i < IMG_DIMENSION) && (j >= 0) && (j < IMG_DIMENSION);
}
__device__ __host__ uchar local_binary_pattern(uchar *image, int i, int j) {
uchar center = image[i * IMG_DIMENSION + j];
uchar pattern = 0;
if (is_in_image_bounds(i - 1, j - 1)) pattern |= (image[(i - 1) * IMG_DIMENSION + (j - 1)] >= center) << 7;
if (is_in_image_bounds(i - 1, j )) pattern |= (image[(i - 1) * IMG_DIMENSION + (j )] >= center) << 6;
if (is_in_image_bounds(i - 1, j + 1)) pattern |= (image[(i - 1) * IMG_DIMENSION + (j + 1)] >= center) << 5;
if (is_in_image_bounds(i , j + 1)) pattern |= (image[(i ) * IMG_DIMENSION + (j + 1)] >= center) << 4;
if (is_in_image_bounds(i + 1, j + 1)) pattern |= (image[(i + 1) * IMG_DIMENSION + (j + 1)] >= center) << 3;
if (is_in_image_bounds(i + 1, j )) pattern |= (image[(i + 1) * IMG_DIMENSION + (j )] >= center) << 2;
if (is_in_image_bounds(i + 1, j - 1)) pattern |= (image[(i + 1) * IMG_DIMENSION + (j - 1)] >= center) << 1;
if (is_in_image_bounds(i , j - 1)) pattern |= (image[(i ) * IMG_DIMENSION + (j - 1)] >= center) << 0;
return pattern;
}
__device__ void gpu_image_to_histogram(uchar *image, int *histogram) {
uchar pattern = local_binary_pattern(image, threadIdx.x / IMG_DIMENSION, threadIdx.x % IMG_DIMENSION);
atomicAdd(&histogram[pattern], 1);
}
__device__ void gpu_histogram_distance(int *h1, int *h2, double *distance) {
int length = 256;
int tid = threadIdx.x;
if(tid<length){
distance[tid] = 0;
if (h1[tid] + h2[tid] != 0) {
distance[tid] = ((double)SQR(h1[tid] - h2[tid])) / (h1[tid] + h2[tid]);
}
h1[tid] = h2[tid]=0;
}
__syncthreads();
while (length > 1) {
if (threadIdx.x < length / 2) {
distance[tid] = distance[tid] + distance[tid + length / 2];
}
length /= 2;
__syncthreads();
}
}
void image_to_histogram(uchar *image, int *histogram) {
memset(histogram, 0, sizeof(int) * 256);
for (int i = 0; i < IMG_DIMENSION; i++) {
for (int j = 0; j < IMG_DIMENSION; j++) {
uchar pattern = local_binary_pattern(image, i, j);
histogram[pattern]++;
}
}
}
double histogram_distance(int *h1, int *h2) {
/* we'll use the chi-square distance *//*
double distance = 0;
for (int i = 0; i < 256; i++) {
if (h1[i] + h2[i] != 0) {
distance += ((double)SQR(h1[i] - h2[i])) / (h1[i] + h2[i]);
}
}
return distance;
}
double static inline get_time_msec(void) {
struct timeval t;
gettimeofday(&t, NULL);
return t.tv_sec * 1e+3 + t.tv_usec * 1e-3;
}
/* we won't load actual files. just fill the images with random bytes *//*
void load_image_pairs(uchar *images1, uchar *images2) {
srand(0);
for (int i = 0; i < N_IMG_PAIRS * IMG_DIMENSION * IMG_DIMENSION; i++) {
images1[i] = rand() % 256;
images2[i] = rand() % 256;
}
}
/*************************************************/
/*******CLASS***producer***consumer****queue******/
/*************************************************//*
class cpu2gpuQueue {
public:
cpu2gpuQueue():size(QUEUE_SIZE),head(0),tail(0){/*printf("head=%d\tsize=%d\n",head,size)*//*;}
~cpu2gpuQueue(){}
__device__ __host__ cpu2gpuQueue& operator=(const cpu2gpuQueue& rhs);
__host__ int produce(uchar* imag1,uchar* imag2);
__device__ int consume(uchar* images);
private:
volatile int size;
volatile int head;
volatile int tail;
uchar q[QUEUE_SIZE*SQR(IMG_DIMENSION)];
};
__device__ __host__ cpu2gpuQueue& cpu2gpuQueue::operator=(const cpu2gpuQueue& rhs)
{
this->head=rhs.head;
this->size=rhs.size;
this->tail=rhs.tail;
memcpy(this->q,rhs.q,QUEUE_SIZE*SQR(IMG_DIMENSION)*sizeof(*rhs.q));
return *this;
}
__device__ int cpu2gpuQueue::consume(uchar* images)
{
if(!threadIdx.x)
{
printf("cpu2gpuQueue::consume\n");
printf("tail=%d\thead=%d\n",tail,head);
}
if(!(tail<head))return 0;
/*int i;
for(i=threadIdx.x;i<2*SQR(IMG_DIMENSION);i+=gridDim.x)
images[i]=q[(tail%QUEUE_SIZE)*2*SQR(IMG_DIMENSION)+i];*//*
//make sure all threads copied before increasing the value of tail
__syncthreads();
if(!threadIdx.x)
{
size++;
tail++;
printf("gpu size=%d\n",size);
__threadfence_system();
}
// __syncthreads();
return 1;
}
__host__ int cpu2gpuQueue::produce(uchar* imag1,uchar* imag2)
{
if(!(head<size)){
//printf("head=%d\tsize=%d\ttrue\n",head,size);
return 0;
}
memcpy(&q[(head%QUEUE_SIZE)*2*SQR(IMG_DIMENSION)],imag1,SQR(IMG_DIMENSION)*sizeof(uchar));
memcpy(&q[(head%QUEUE_SIZE)*2*SQR(IMG_DIMENSION)+SQR(IMG_DIMENSION)],imag2,SQR(IMG_DIMENSION)*sizeof(uchar));
head++;
return 1;
}
class gpu2cpuQueue {
public:
gpu2cpuQueue():size(QUEUE_SIZE),head(0),tail(0){}
~gpu2cpuQueue(){}
__device__ __host__ gpu2cpuQueue& operator=(const gpu2cpuQueue& rhs);
__device__ int produce(double distance);
__host__ int consume(double* distance);
void testadd(){size++;}
void test(){
for(int i=0;i<QUEUE_SIZE;i++)
printf("%f\t",q[i]);
printf("\n");
}
private:
volatile int size;
volatile int head;
volatile int tail;
double q[QUEUE_SIZE];
};
__device__ __host__ gpu2cpuQueue& gpu2cpuQueue::operator=(const gpu2cpuQueue& rhs)
{
this->head=rhs.head;
this->size=rhs.size;
this->tail=rhs.tail;
memcpy(this->q,rhs.q,QUEUE_SIZE*sizeof(*rhs.q));
return *this;
}
static int x=0;
__host__ int gpu2cpuQueue::consume(double* distance)
{
if((!(x%100))&&x<500){
printf("gpu2cpuQueue::consume\n");
printf("tail=%d\thead=%d\n",tail,head);
}
x++;
if(!(tail<head))return 0;
*distance=q[(tail%QUEUE_SIZE)];
printf("gpu2cpuQueue::consume\n");
printf("tail=%d\thead=%d\n",tail,head);
printf("distance=%f\n",*distance);
printf("size=%d\n",size);
size++;
tail++;
return 1;
}
__device__ int gpu2cpuQueue::produce(double distance)
{
if(!(head<size)) return 0;
if(threadIdx.x) return 1;
printf("gpu size=%d\t(head%QUEUE_SIZE)=%d\n",size,(head%QUEUE_SIZE));
q[(head%QUEUE_SIZE)]=distance;
//printf("before\n");
//printf("distance=%f\n",distance);
__threadfence_system();
//printf("after\n");
head++;
__threadfence_system();
return 1;
}
struct QP{
cpu2gpuQueue cpugpu;
gpu2cpuQueue gpucpu;
};
__global__ void test(struct QP* Ptr){
int i;
if(!threadIdx.x) printf("test kernel\n");
__shared__ uchar images[2*SQR(IMG_DIMENSION)];
__shared__ int hist1[HIST_SIZE],hist2[HIST_SIZE];
__shared__ double distance[HIST_SIZE];
//if(threadIdx.x<HIST_SIZE)
//hist1[threadIdx.x]=hist2[threadIdx.x]=0;
//if(!threadIdx.x) printf("test kernel\n");
for(int i=0;i<NREQUESTS;i++)
{
//if(!threadIdx.x)printf("gpu loop i=%d\n",i);
while(!Ptr->cpugpu.consume(images));
if(!threadIdx.x)printf("gpu loop i=%d\n",i);
//gpu_image_to_histogram(images,hist1);
//gpu_image_to_histogram(images+SQR(IMG_DIMENSION),hist2);
//__syncthreads();
//gpu_histogram_distance(hist1,hist2,distance);
//while(!Ptr->gpucpu.produce(distance[0]));
while(!Ptr->gpucpu.produce((double)i));
__syncthreads();
}
}
__global__ void test1(int* Ptr)
{
int i;
for(int i=0;i<NREQUESTS;++i)
Ptr[i]=i;
printf("kernel finish\n");
return;
}
int main(void) {
uchar *images1;
uchar *images2;
CUDA_CHECK( cudaHostAlloc(&images1, N_IMG_PAIRS * IMG_DIMENSION * IMG_DIMENSION, 0) );
CUDA_CHECK( cudaHostAlloc(&images2, N_IMG_PAIRS * IMG_DIMENSION * IMG_DIMENSION, 0) );
load_image_pairs(images1, images2);
double t_start, t_finish;
double total_distance=0,distance=0;
int i=NREQUESTS,finished=0;
struct QP *cpuqp,*gpuqp;
CUDA_CHECK( cudaHostAlloc(&cpuqp, sizeof(struct QP), cudaHostAllocWriteCombined) );
cpuqp->cpugpu=cpu2gpuQueue();
cpuqp->gpucpu=gpu2cpuQueue();
CUDA_CHECK( cudaHostGetDevicePointer(&gpuqp,cpuqp,0) );
printf("\n=== CPU ===\n");
int histogram1[256];
int histogram2[256];
t_start = get_time_msec();
for (int i = 0; i < NREQUESTS; i++) {
int img_idx = i % N_IMG_PAIRS;
image_to_histogram(&images1[img_idx * IMG_DIMENSION * IMG_DIMENSION], histogram1);
image_to_histogram(&images2[img_idx * IMG_DIMENSION * IMG_DIMENSION], histogram2);
total_distance += histogram_distance(histogram1, histogram2);
}
t_finish = get_time_msec();
printf("average distance between images %f\n", total_distance / NREQUESTS);
printf("throughput = %lf (req/sec)\n", NREQUESTS / (t_finish - t_start) * 1e+3);
total_distance=0;
test<<<1, 1024>>>(gpuqp);
cpuqp->gpucpu.testadd();
//printf("after\n");
for(int i=0;i<NREQUESTS;i++)
{
printf("cpu loop i=%d\n",i);
distance=0;
if(cpuqp->gpucpu.consume(&distance))
{
//printf("distance=%f\n",distance);
total_distance+=distance;
finished++;
printf("finished=%d\n",finished);
}
int img_idx = i % N_IMG_PAIRS,j;
//for(j=0;j<SQR(IMG_DIMENSION);++j)printf("%d%d",images1[img_idx * IMG_DIMENSION * IMG_DIMENSION+j],images2[img_idx * IMG_DIMENSION * IMG_DIMENSION+j]);
//printf("\n");
while(!cpuqp->cpugpu.produce(&images1[img_idx * IMG_DIMENSION * IMG_DIMENSION],&images2[img_idx * IMG_DIMENSION * IMG_DIMENSION]));
cpuqp->gpucpu.test();
}
printf("finish loop\n");
/*while(finished<NREQUESTS)
{
if(cpuqp->gpucpu.consume(&distance))
{
//printf("distance=%f\n",distance);
total_distance+=distance;
finished++;
printf("finished=%d\n",finished);
}
}*//*
printf("finish loop 2\n");
CUDA_CHECK( cudaDeviceSynchronize());
cpuqp->gpucpu.test();
printf("average distance between images %f\n", total_distance / NREQUESTS);
return 0;
}*/ | code for sm_80 |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | /* *
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <unistd.h>
#include <assert.h>
#include <string.h>
/*
#define IMG_DIMENSION 32
#define N_IMG_PAIRS 10000
#define NREQUESTS 6
#define N_STREMS 64
#define HIST_SIZE 256
typedef unsigned char uchar;
#define OUT
#define CUDA_CHECK(f) do { \
cudaError_t e = f; \
if (e != cudaSuccess) { \
printf("Cuda failure %s:%d: '%s'\n", __FILE__, __LINE__, cudaGetErrorString(e)); \
exit(1); \
} \
} while (0)
#define SQR(a) ((a) * (a))
#define QUEUE_SIZE 10
__device__ __host__ bool is_in_image_bounds(int i, int j) {
return (i >= 0) && (i < IMG_DIMENSION) && (j >= 0) && (j < IMG_DIMENSION);
}
__device__ __host__ uchar local_binary_pattern(uchar *image, int i, int j) {
uchar center = image[i * IMG_DIMENSION + j];
uchar pattern = 0;
if (is_in_image_bounds(i - 1, j - 1)) pattern |= (image[(i - 1) * IMG_DIMENSION + (j - 1)] >= center) << 7;
if (is_in_image_bounds(i - 1, j )) pattern |= (image[(i - 1) * IMG_DIMENSION + (j )] >= center) << 6;
if (is_in_image_bounds(i - 1, j + 1)) pattern |= (image[(i - 1) * IMG_DIMENSION + (j + 1)] >= center) << 5;
if (is_in_image_bounds(i , j + 1)) pattern |= (image[(i ) * IMG_DIMENSION + (j + 1)] >= center) << 4;
if (is_in_image_bounds(i + 1, j + 1)) pattern |= (image[(i + 1) * IMG_DIMENSION + (j + 1)] >= center) << 3;
if (is_in_image_bounds(i + 1, j )) pattern |= (image[(i + 1) * IMG_DIMENSION + (j )] >= center) << 2;
if (is_in_image_bounds(i + 1, j - 1)) pattern |= (image[(i + 1) * IMG_DIMENSION + (j - 1)] >= center) << 1;
if (is_in_image_bounds(i , j - 1)) pattern |= (image[(i ) * IMG_DIMENSION + (j - 1)] >= center) << 0;
return pattern;
}
__device__ void gpu_image_to_histogram(uchar *image, int *histogram) {
uchar pattern = local_binary_pattern(image, threadIdx.x / IMG_DIMENSION, threadIdx.x % IMG_DIMENSION);
atomicAdd(&histogram[pattern], 1);
}
__device__ void gpu_histogram_distance(int *h1, int *h2, double *distance) {
int length = 256;
int tid = threadIdx.x;
if(tid<length){
distance[tid] = 0;
if (h1[tid] + h2[tid] != 0) {
distance[tid] = ((double)SQR(h1[tid] - h2[tid])) / (h1[tid] + h2[tid]);
}
h1[tid] = h2[tid]=0;
}
__syncthreads();
while (length > 1) {
if (threadIdx.x < length / 2) {
distance[tid] = distance[tid] + distance[tid + length / 2];
}
length /= 2;
__syncthreads();
}
}
void image_to_histogram(uchar *image, int *histogram) {
memset(histogram, 0, sizeof(int) * 256);
for (int i = 0; i < IMG_DIMENSION; i++) {
for (int j = 0; j < IMG_DIMENSION; j++) {
uchar pattern = local_binary_pattern(image, i, j);
histogram[pattern]++;
}
}
}
double histogram_distance(int *h1, int *h2) {
/* we'll use the chi-square distance *//*
double distance = 0;
for (int i = 0; i < 256; i++) {
if (h1[i] + h2[i] != 0) {
distance += ((double)SQR(h1[i] - h2[i])) / (h1[i] + h2[i]);
}
}
return distance;
}
double static inline get_time_msec(void) {
struct timeval t;
gettimeofday(&t, NULL);
return t.tv_sec * 1e+3 + t.tv_usec * 1e-3;
}
/* we won't load actual files. just fill the images with random bytes *//*
void load_image_pairs(uchar *images1, uchar *images2) {
srand(0);
for (int i = 0; i < N_IMG_PAIRS * IMG_DIMENSION * IMG_DIMENSION; i++) {
images1[i] = rand() % 256;
images2[i] = rand() % 256;
}
}
/*************************************************/
/*******CLASS***producer***consumer****queue******/
/*************************************************//*
class cpu2gpuQueue {
public:
cpu2gpuQueue():size(QUEUE_SIZE),head(0),tail(0){/*printf("head=%d\tsize=%d\n",head,size)*//*;}
~cpu2gpuQueue(){}
__device__ __host__ cpu2gpuQueue& operator=(const cpu2gpuQueue& rhs);
__host__ int produce(uchar* imag1,uchar* imag2);
__device__ int consume(uchar* images);
private:
volatile int size;
volatile int head;
volatile int tail;
uchar q[QUEUE_SIZE*SQR(IMG_DIMENSION)];
};
__device__ __host__ cpu2gpuQueue& cpu2gpuQueue::operator=(const cpu2gpuQueue& rhs)
{
this->head=rhs.head;
this->size=rhs.size;
this->tail=rhs.tail;
memcpy(this->q,rhs.q,QUEUE_SIZE*SQR(IMG_DIMENSION)*sizeof(*rhs.q));
return *this;
}
__device__ int cpu2gpuQueue::consume(uchar* images)
{
if(!threadIdx.x)
{
printf("cpu2gpuQueue::consume\n");
printf("tail=%d\thead=%d\n",tail,head);
}
if(!(tail<head))return 0;
/*int i;
for(i=threadIdx.x;i<2*SQR(IMG_DIMENSION);i+=gridDim.x)
images[i]=q[(tail%QUEUE_SIZE)*2*SQR(IMG_DIMENSION)+i];*//*
//make sure all threads copied before increasing the value of tail
__syncthreads();
if(!threadIdx.x)
{
size++;
tail++;
printf("gpu size=%d\n",size);
__threadfence_system();
}
// __syncthreads();
return 1;
}
__host__ int cpu2gpuQueue::produce(uchar* imag1,uchar* imag2)
{
if(!(head<size)){
//printf("head=%d\tsize=%d\ttrue\n",head,size);
return 0;
}
memcpy(&q[(head%QUEUE_SIZE)*2*SQR(IMG_DIMENSION)],imag1,SQR(IMG_DIMENSION)*sizeof(uchar));
memcpy(&q[(head%QUEUE_SIZE)*2*SQR(IMG_DIMENSION)+SQR(IMG_DIMENSION)],imag2,SQR(IMG_DIMENSION)*sizeof(uchar));
head++;
return 1;
}
class gpu2cpuQueue {
public:
gpu2cpuQueue():size(QUEUE_SIZE),head(0),tail(0){}
~gpu2cpuQueue(){}
__device__ __host__ gpu2cpuQueue& operator=(const gpu2cpuQueue& rhs);
__device__ int produce(double distance);
__host__ int consume(double* distance);
void testadd(){size++;}
void test(){
for(int i=0;i<QUEUE_SIZE;i++)
printf("%f\t",q[i]);
printf("\n");
}
private:
volatile int size;
volatile int head;
volatile int tail;
double q[QUEUE_SIZE];
};
__device__ __host__ gpu2cpuQueue& gpu2cpuQueue::operator=(const gpu2cpuQueue& rhs)
{
this->head=rhs.head;
this->size=rhs.size;
this->tail=rhs.tail;
memcpy(this->q,rhs.q,QUEUE_SIZE*sizeof(*rhs.q));
return *this;
}
static int x=0;
__host__ int gpu2cpuQueue::consume(double* distance)
{
if((!(x%100))&&x<500){
printf("gpu2cpuQueue::consume\n");
printf("tail=%d\thead=%d\n",tail,head);
}
x++;
if(!(tail<head))return 0;
*distance=q[(tail%QUEUE_SIZE)];
printf("gpu2cpuQueue::consume\n");
printf("tail=%d\thead=%d\n",tail,head);
printf("distance=%f\n",*distance);
printf("size=%d\n",size);
size++;
tail++;
return 1;
}
__device__ int gpu2cpuQueue::produce(double distance)
{
if(!(head<size)) return 0;
if(threadIdx.x) return 1;
printf("gpu size=%d\t(head%QUEUE_SIZE)=%d\n",size,(head%QUEUE_SIZE));
q[(head%QUEUE_SIZE)]=distance;
//printf("before\n");
//printf("distance=%f\n",distance);
__threadfence_system();
//printf("after\n");
head++;
__threadfence_system();
return 1;
}
struct QP{
cpu2gpuQueue cpugpu;
gpu2cpuQueue gpucpu;
};
__global__ void test(struct QP* Ptr){
int i;
if(!threadIdx.x) printf("test kernel\n");
__shared__ uchar images[2*SQR(IMG_DIMENSION)];
__shared__ int hist1[HIST_SIZE],hist2[HIST_SIZE];
__shared__ double distance[HIST_SIZE];
//if(threadIdx.x<HIST_SIZE)
//hist1[threadIdx.x]=hist2[threadIdx.x]=0;
//if(!threadIdx.x) printf("test kernel\n");
for(int i=0;i<NREQUESTS;i++)
{
//if(!threadIdx.x)printf("gpu loop i=%d\n",i);
while(!Ptr->cpugpu.consume(images));
if(!threadIdx.x)printf("gpu loop i=%d\n",i);
//gpu_image_to_histogram(images,hist1);
//gpu_image_to_histogram(images+SQR(IMG_DIMENSION),hist2);
//__syncthreads();
//gpu_histogram_distance(hist1,hist2,distance);
//while(!Ptr->gpucpu.produce(distance[0]));
while(!Ptr->gpucpu.produce((double)i));
__syncthreads();
}
}
__global__ void test1(int* Ptr)
{
int i;
for(int i=0;i<NREQUESTS;++i)
Ptr[i]=i;
printf("kernel finish\n");
return;
}
int main(void) {
uchar *images1;
uchar *images2;
CUDA_CHECK( cudaHostAlloc(&images1, N_IMG_PAIRS * IMG_DIMENSION * IMG_DIMENSION, 0) );
CUDA_CHECK( cudaHostAlloc(&images2, N_IMG_PAIRS * IMG_DIMENSION * IMG_DIMENSION, 0) );
load_image_pairs(images1, images2);
double t_start, t_finish;
double total_distance=0,distance=0;
int i=NREQUESTS,finished=0;
struct QP *cpuqp,*gpuqp;
CUDA_CHECK( cudaHostAlloc(&cpuqp, sizeof(struct QP), cudaHostAllocWriteCombined) );
cpuqp->cpugpu=cpu2gpuQueue();
cpuqp->gpucpu=gpu2cpuQueue();
CUDA_CHECK( cudaHostGetDevicePointer(&gpuqp,cpuqp,0) );
printf("\n=== CPU ===\n");
int histogram1[256];
int histogram2[256];
t_start = get_time_msec();
for (int i = 0; i < NREQUESTS; i++) {
int img_idx = i % N_IMG_PAIRS;
image_to_histogram(&images1[img_idx * IMG_DIMENSION * IMG_DIMENSION], histogram1);
image_to_histogram(&images2[img_idx * IMG_DIMENSION * IMG_DIMENSION], histogram2);
total_distance += histogram_distance(histogram1, histogram2);
}
t_finish = get_time_msec();
printf("average distance between images %f\n", total_distance / NREQUESTS);
printf("throughput = %lf (req/sec)\n", NREQUESTS / (t_finish - t_start) * 1e+3);
total_distance=0;
test<<<1, 1024>>>(gpuqp);
cpuqp->gpucpu.testadd();
//printf("after\n");
for(int i=0;i<NREQUESTS;i++)
{
printf("cpu loop i=%d\n",i);
distance=0;
if(cpuqp->gpucpu.consume(&distance))
{
//printf("distance=%f\n",distance);
total_distance+=distance;
finished++;
printf("finished=%d\n",finished);
}
int img_idx = i % N_IMG_PAIRS,j;
//for(j=0;j<SQR(IMG_DIMENSION);++j)printf("%d%d",images1[img_idx * IMG_DIMENSION * IMG_DIMENSION+j],images2[img_idx * IMG_DIMENSION * IMG_DIMENSION+j]);
//printf("\n");
while(!cpuqp->cpugpu.produce(&images1[img_idx * IMG_DIMENSION * IMG_DIMENSION],&images2[img_idx * IMG_DIMENSION * IMG_DIMENSION]));
cpuqp->gpucpu.test();
}
printf("finish loop\n");
/*while(finished<NREQUESTS)
{
if(cpuqp->gpucpu.consume(&distance))
{
//printf("distance=%f\n",distance);
total_distance+=distance;
finished++;
printf("finished=%d\n",finished);
}
}*//*
printf("finish loop 2\n");
CUDA_CHECK( cudaDeviceSynchronize());
cpuqp->gpucpu.test();
printf("average distance between images %f\n", total_distance / NREQUESTS);
return 0;
}*/ | .file "tmpxft_0012321c_00000000-6_cpugpuqueue.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2072:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2072:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2095:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2095:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | /* *
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <unistd.h>
#include <assert.h>
#include <string.h>
/*
#define IMG_DIMENSION 32
#define N_IMG_PAIRS 10000
#define NREQUESTS 6
#define N_STREMS 64
#define HIST_SIZE 256
typedef unsigned char uchar;
#define OUT
#define CUDA_CHECK(f) do { \
cudaError_t e = f; \
if (e != cudaSuccess) { \
printf("Cuda failure %s:%d: '%s'\n", __FILE__, __LINE__, cudaGetErrorString(e)); \
exit(1); \
} \
} while (0)
#define SQR(a) ((a) * (a))
#define QUEUE_SIZE 10
__device__ __host__ bool is_in_image_bounds(int i, int j) {
return (i >= 0) && (i < IMG_DIMENSION) && (j >= 0) && (j < IMG_DIMENSION);
}
__device__ __host__ uchar local_binary_pattern(uchar *image, int i, int j) {
uchar center = image[i * IMG_DIMENSION + j];
uchar pattern = 0;
if (is_in_image_bounds(i - 1, j - 1)) pattern |= (image[(i - 1) * IMG_DIMENSION + (j - 1)] >= center) << 7;
if (is_in_image_bounds(i - 1, j )) pattern |= (image[(i - 1) * IMG_DIMENSION + (j )] >= center) << 6;
if (is_in_image_bounds(i - 1, j + 1)) pattern |= (image[(i - 1) * IMG_DIMENSION + (j + 1)] >= center) << 5;
if (is_in_image_bounds(i , j + 1)) pattern |= (image[(i ) * IMG_DIMENSION + (j + 1)] >= center) << 4;
if (is_in_image_bounds(i + 1, j + 1)) pattern |= (image[(i + 1) * IMG_DIMENSION + (j + 1)] >= center) << 3;
if (is_in_image_bounds(i + 1, j )) pattern |= (image[(i + 1) * IMG_DIMENSION + (j )] >= center) << 2;
if (is_in_image_bounds(i + 1, j - 1)) pattern |= (image[(i + 1) * IMG_DIMENSION + (j - 1)] >= center) << 1;
if (is_in_image_bounds(i , j - 1)) pattern |= (image[(i ) * IMG_DIMENSION + (j - 1)] >= center) << 0;
return pattern;
}
__device__ void gpu_image_to_histogram(uchar *image, int *histogram) {
uchar pattern = local_binary_pattern(image, threadIdx.x / IMG_DIMENSION, threadIdx.x % IMG_DIMENSION);
atomicAdd(&histogram[pattern], 1);
}
__device__ void gpu_histogram_distance(int *h1, int *h2, double *distance) {
int length = 256;
int tid = threadIdx.x;
if(tid<length){
distance[tid] = 0;
if (h1[tid] + h2[tid] != 0) {
distance[tid] = ((double)SQR(h1[tid] - h2[tid])) / (h1[tid] + h2[tid]);
}
h1[tid] = h2[tid]=0;
}
__syncthreads();
while (length > 1) {
if (threadIdx.x < length / 2) {
distance[tid] = distance[tid] + distance[tid + length / 2];
}
length /= 2;
__syncthreads();
}
}
void image_to_histogram(uchar *image, int *histogram) {
memset(histogram, 0, sizeof(int) * 256);
for (int i = 0; i < IMG_DIMENSION; i++) {
for (int j = 0; j < IMG_DIMENSION; j++) {
uchar pattern = local_binary_pattern(image, i, j);
histogram[pattern]++;
}
}
}
double histogram_distance(int *h1, int *h2) {
/* we'll use the chi-square distance *//*
double distance = 0;
for (int i = 0; i < 256; i++) {
if (h1[i] + h2[i] != 0) {
distance += ((double)SQR(h1[i] - h2[i])) / (h1[i] + h2[i]);
}
}
return distance;
}
double static inline get_time_msec(void) {
struct timeval t;
gettimeofday(&t, NULL);
return t.tv_sec * 1e+3 + t.tv_usec * 1e-3;
}
/* we won't load actual files. just fill the images with random bytes *//*
void load_image_pairs(uchar *images1, uchar *images2) {
srand(0);
for (int i = 0; i < N_IMG_PAIRS * IMG_DIMENSION * IMG_DIMENSION; i++) {
images1[i] = rand() % 256;
images2[i] = rand() % 256;
}
}
/*************************************************/
/*******CLASS***producer***consumer****queue******/
/*************************************************//*
class cpu2gpuQueue {
public:
cpu2gpuQueue():size(QUEUE_SIZE),head(0),tail(0){/*printf("head=%d\tsize=%d\n",head,size)*//*;}
~cpu2gpuQueue(){}
__device__ __host__ cpu2gpuQueue& operator=(const cpu2gpuQueue& rhs);
__host__ int produce(uchar* imag1,uchar* imag2);
__device__ int consume(uchar* images);
private:
volatile int size;
volatile int head;
volatile int tail;
uchar q[QUEUE_SIZE*SQR(IMG_DIMENSION)];
};
__device__ __host__ cpu2gpuQueue& cpu2gpuQueue::operator=(const cpu2gpuQueue& rhs)
{
this->head=rhs.head;
this->size=rhs.size;
this->tail=rhs.tail;
memcpy(this->q,rhs.q,QUEUE_SIZE*SQR(IMG_DIMENSION)*sizeof(*rhs.q));
return *this;
}
__device__ int cpu2gpuQueue::consume(uchar* images)
{
if(!threadIdx.x)
{
printf("cpu2gpuQueue::consume\n");
printf("tail=%d\thead=%d\n",tail,head);
}
if(!(tail<head))return 0;
/*int i;
for(i=threadIdx.x;i<2*SQR(IMG_DIMENSION);i+=gridDim.x)
images[i]=q[(tail%QUEUE_SIZE)*2*SQR(IMG_DIMENSION)+i];*//*
//make sure all threads copied before increasing the value of tail
__syncthreads();
if(!threadIdx.x)
{
size++;
tail++;
printf("gpu size=%d\n",size);
__threadfence_system();
}
// __syncthreads();
return 1;
}
__host__ int cpu2gpuQueue::produce(uchar* imag1,uchar* imag2)
{
if(!(head<size)){
//printf("head=%d\tsize=%d\ttrue\n",head,size);
return 0;
}
memcpy(&q[(head%QUEUE_SIZE)*2*SQR(IMG_DIMENSION)],imag1,SQR(IMG_DIMENSION)*sizeof(uchar));
memcpy(&q[(head%QUEUE_SIZE)*2*SQR(IMG_DIMENSION)+SQR(IMG_DIMENSION)],imag2,SQR(IMG_DIMENSION)*sizeof(uchar));
head++;
return 1;
}
class gpu2cpuQueue {
public:
gpu2cpuQueue():size(QUEUE_SIZE),head(0),tail(0){}
~gpu2cpuQueue(){}
__device__ __host__ gpu2cpuQueue& operator=(const gpu2cpuQueue& rhs);
__device__ int produce(double distance);
__host__ int consume(double* distance);
void testadd(){size++;}
void test(){
for(int i=0;i<QUEUE_SIZE;i++)
printf("%f\t",q[i]);
printf("\n");
}
private:
volatile int size;
volatile int head;
volatile int tail;
double q[QUEUE_SIZE];
};
__device__ __host__ gpu2cpuQueue& gpu2cpuQueue::operator=(const gpu2cpuQueue& rhs)
{
this->head=rhs.head;
this->size=rhs.size;
this->tail=rhs.tail;
memcpy(this->q,rhs.q,QUEUE_SIZE*sizeof(*rhs.q));
return *this;
}
static int x=0;
__host__ int gpu2cpuQueue::consume(double* distance)
{
if((!(x%100))&&x<500){
printf("gpu2cpuQueue::consume\n");
printf("tail=%d\thead=%d\n",tail,head);
}
x++;
if(!(tail<head))return 0;
*distance=q[(tail%QUEUE_SIZE)];
printf("gpu2cpuQueue::consume\n");
printf("tail=%d\thead=%d\n",tail,head);
printf("distance=%f\n",*distance);
printf("size=%d\n",size);
size++;
tail++;
return 1;
}
__device__ int gpu2cpuQueue::produce(double distance)
{
if(!(head<size)) return 0;
if(threadIdx.x) return 1;
printf("gpu size=%d\t(head%QUEUE_SIZE)=%d\n",size,(head%QUEUE_SIZE));
q[(head%QUEUE_SIZE)]=distance;
//printf("before\n");
//printf("distance=%f\n",distance);
__threadfence_system();
//printf("after\n");
head++;
__threadfence_system();
return 1;
}
struct QP{
cpu2gpuQueue cpugpu;
gpu2cpuQueue gpucpu;
};
__global__ void test(struct QP* Ptr){
int i;
if(!threadIdx.x) printf("test kernel\n");
__shared__ uchar images[2*SQR(IMG_DIMENSION)];
__shared__ int hist1[HIST_SIZE],hist2[HIST_SIZE];
__shared__ double distance[HIST_SIZE];
//if(threadIdx.x<HIST_SIZE)
//hist1[threadIdx.x]=hist2[threadIdx.x]=0;
//if(!threadIdx.x) printf("test kernel\n");
for(int i=0;i<NREQUESTS;i++)
{
//if(!threadIdx.x)printf("gpu loop i=%d\n",i);
while(!Ptr->cpugpu.consume(images));
if(!threadIdx.x)printf("gpu loop i=%d\n",i);
//gpu_image_to_histogram(images,hist1);
//gpu_image_to_histogram(images+SQR(IMG_DIMENSION),hist2);
//__syncthreads();
//gpu_histogram_distance(hist1,hist2,distance);
//while(!Ptr->gpucpu.produce(distance[0]));
while(!Ptr->gpucpu.produce((double)i));
__syncthreads();
}
}
__global__ void test1(int* Ptr)
{
int i;
for(int i=0;i<NREQUESTS;++i)
Ptr[i]=i;
printf("kernel finish\n");
return;
}
int main(void) {
uchar *images1;
uchar *images2;
CUDA_CHECK( cudaHostAlloc(&images1, N_IMG_PAIRS * IMG_DIMENSION * IMG_DIMENSION, 0) );
CUDA_CHECK( cudaHostAlloc(&images2, N_IMG_PAIRS * IMG_DIMENSION * IMG_DIMENSION, 0) );
load_image_pairs(images1, images2);
double t_start, t_finish;
double total_distance=0,distance=0;
int i=NREQUESTS,finished=0;
struct QP *cpuqp,*gpuqp;
CUDA_CHECK( cudaHostAlloc(&cpuqp, sizeof(struct QP), cudaHostAllocWriteCombined) );
cpuqp->cpugpu=cpu2gpuQueue();
cpuqp->gpucpu=gpu2cpuQueue();
CUDA_CHECK( cudaHostGetDevicePointer(&gpuqp,cpuqp,0) );
printf("\n=== CPU ===\n");
int histogram1[256];
int histogram2[256];
t_start = get_time_msec();
for (int i = 0; i < NREQUESTS; i++) {
int img_idx = i % N_IMG_PAIRS;
image_to_histogram(&images1[img_idx * IMG_DIMENSION * IMG_DIMENSION], histogram1);
image_to_histogram(&images2[img_idx * IMG_DIMENSION * IMG_DIMENSION], histogram2);
total_distance += histogram_distance(histogram1, histogram2);
}
t_finish = get_time_msec();
printf("average distance between images %f\n", total_distance / NREQUESTS);
printf("throughput = %lf (req/sec)\n", NREQUESTS / (t_finish - t_start) * 1e+3);
total_distance=0;
test<<<1, 1024>>>(gpuqp);
cpuqp->gpucpu.testadd();
//printf("after\n");
for(int i=0;i<NREQUESTS;i++)
{
printf("cpu loop i=%d\n",i);
distance=0;
if(cpuqp->gpucpu.consume(&distance))
{
//printf("distance=%f\n",distance);
total_distance+=distance;
finished++;
printf("finished=%d\n",finished);
}
int img_idx = i % N_IMG_PAIRS,j;
//for(j=0;j<SQR(IMG_DIMENSION);++j)printf("%d%d",images1[img_idx * IMG_DIMENSION * IMG_DIMENSION+j],images2[img_idx * IMG_DIMENSION * IMG_DIMENSION+j]);
//printf("\n");
while(!cpuqp->cpugpu.produce(&images1[img_idx * IMG_DIMENSION * IMG_DIMENSION],&images2[img_idx * IMG_DIMENSION * IMG_DIMENSION]));
cpuqp->gpucpu.test();
}
printf("finish loop\n");
/*while(finished<NREQUESTS)
{
if(cpuqp->gpucpu.consume(&distance))
{
//printf("distance=%f\n",distance);
total_distance+=distance;
finished++;
printf("finished=%d\n",finished);
}
}*//*
printf("finish loop 2\n");
CUDA_CHECK( cudaDeviceSynchronize());
cpuqp->gpucpu.test();
printf("average distance between images %f\n", total_distance / NREQUESTS);
return 0;
}*/ | /* *
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <unistd.h>
#include <assert.h>
#include <string.h>
/*
#define IMG_DIMENSION 32
#define N_IMG_PAIRS 10000
#define NREQUESTS 6
#define N_STREMS 64
#define HIST_SIZE 256
typedef unsigned char uchar;
#define OUT
#define CUDA_CHECK(f) do { \
cudaError_t e = f; \
if (e != cudaSuccess) { \
printf("Cuda failure %s:%d: '%s'\n", __FILE__, __LINE__, cudaGetErrorString(e)); \
exit(1); \
} \
} while (0)
#define SQR(a) ((a) * (a))
#define QUEUE_SIZE 10
__device__ __host__ bool is_in_image_bounds(int i, int j) {
return (i >= 0) && (i < IMG_DIMENSION) && (j >= 0) && (j < IMG_DIMENSION);
}
__device__ __host__ uchar local_binary_pattern(uchar *image, int i, int j) {
uchar center = image[i * IMG_DIMENSION + j];
uchar pattern = 0;
if (is_in_image_bounds(i - 1, j - 1)) pattern |= (image[(i - 1) * IMG_DIMENSION + (j - 1)] >= center) << 7;
if (is_in_image_bounds(i - 1, j )) pattern |= (image[(i - 1) * IMG_DIMENSION + (j )] >= center) << 6;
if (is_in_image_bounds(i - 1, j + 1)) pattern |= (image[(i - 1) * IMG_DIMENSION + (j + 1)] >= center) << 5;
if (is_in_image_bounds(i , j + 1)) pattern |= (image[(i ) * IMG_DIMENSION + (j + 1)] >= center) << 4;
if (is_in_image_bounds(i + 1, j + 1)) pattern |= (image[(i + 1) * IMG_DIMENSION + (j + 1)] >= center) << 3;
if (is_in_image_bounds(i + 1, j )) pattern |= (image[(i + 1) * IMG_DIMENSION + (j )] >= center) << 2;
if (is_in_image_bounds(i + 1, j - 1)) pattern |= (image[(i + 1) * IMG_DIMENSION + (j - 1)] >= center) << 1;
if (is_in_image_bounds(i , j - 1)) pattern |= (image[(i ) * IMG_DIMENSION + (j - 1)] >= center) << 0;
return pattern;
}
__device__ void gpu_image_to_histogram(uchar *image, int *histogram) {
uchar pattern = local_binary_pattern(image, threadIdx.x / IMG_DIMENSION, threadIdx.x % IMG_DIMENSION);
atomicAdd(&histogram[pattern], 1);
}
__device__ void gpu_histogram_distance(int *h1, int *h2, double *distance) {
int length = 256;
int tid = threadIdx.x;
if(tid<length){
distance[tid] = 0;
if (h1[tid] + h2[tid] != 0) {
distance[tid] = ((double)SQR(h1[tid] - h2[tid])) / (h1[tid] + h2[tid]);
}
h1[tid] = h2[tid]=0;
}
__syncthreads();
while (length > 1) {
if (threadIdx.x < length / 2) {
distance[tid] = distance[tid] + distance[tid + length / 2];
}
length /= 2;
__syncthreads();
}
}
void image_to_histogram(uchar *image, int *histogram) {
memset(histogram, 0, sizeof(int) * 256);
for (int i = 0; i < IMG_DIMENSION; i++) {
for (int j = 0; j < IMG_DIMENSION; j++) {
uchar pattern = local_binary_pattern(image, i, j);
histogram[pattern]++;
}
}
}
double histogram_distance(int *h1, int *h2) {
/* we'll use the chi-square distance *//*
double distance = 0;
for (int i = 0; i < 256; i++) {
if (h1[i] + h2[i] != 0) {
distance += ((double)SQR(h1[i] - h2[i])) / (h1[i] + h2[i]);
}
}
return distance;
}
double static inline get_time_msec(void) {
struct timeval t;
gettimeofday(&t, NULL);
return t.tv_sec * 1e+3 + t.tv_usec * 1e-3;
}
/* we won't load actual files. just fill the images with random bytes *//*
void load_image_pairs(uchar *images1, uchar *images2) {
srand(0);
for (int i = 0; i < N_IMG_PAIRS * IMG_DIMENSION * IMG_DIMENSION; i++) {
images1[i] = rand() % 256;
images2[i] = rand() % 256;
}
}
/*************************************************/
/*******CLASS***producer***consumer****queue******/
/*************************************************//*
class cpu2gpuQueue {
public:
cpu2gpuQueue():size(QUEUE_SIZE),head(0),tail(0){/*printf("head=%d\tsize=%d\n",head,size)*//*;}
~cpu2gpuQueue(){}
__device__ __host__ cpu2gpuQueue& operator=(const cpu2gpuQueue& rhs);
__host__ int produce(uchar* imag1,uchar* imag2);
__device__ int consume(uchar* images);
private:
volatile int size;
volatile int head;
volatile int tail;
uchar q[QUEUE_SIZE*SQR(IMG_DIMENSION)];
};
__device__ __host__ cpu2gpuQueue& cpu2gpuQueue::operator=(const cpu2gpuQueue& rhs)
{
this->head=rhs.head;
this->size=rhs.size;
this->tail=rhs.tail;
memcpy(this->q,rhs.q,QUEUE_SIZE*SQR(IMG_DIMENSION)*sizeof(*rhs.q));
return *this;
}
__device__ int cpu2gpuQueue::consume(uchar* images)
{
if(!threadIdx.x)
{
printf("cpu2gpuQueue::consume\n");
printf("tail=%d\thead=%d\n",tail,head);
}
if(!(tail<head))return 0;
/*int i;
for(i=threadIdx.x;i<2*SQR(IMG_DIMENSION);i+=gridDim.x)
images[i]=q[(tail%QUEUE_SIZE)*2*SQR(IMG_DIMENSION)+i];*//*
//make sure all threads copied before increasing the value of tail
__syncthreads();
if(!threadIdx.x)
{
size++;
tail++;
printf("gpu size=%d\n",size);
__threadfence_system();
}
// __syncthreads();
return 1;
}
__host__ int cpu2gpuQueue::produce(uchar* imag1,uchar* imag2)
{
if(!(head<size)){
//printf("head=%d\tsize=%d\ttrue\n",head,size);
return 0;
}
memcpy(&q[(head%QUEUE_SIZE)*2*SQR(IMG_DIMENSION)],imag1,SQR(IMG_DIMENSION)*sizeof(uchar));
memcpy(&q[(head%QUEUE_SIZE)*2*SQR(IMG_DIMENSION)+SQR(IMG_DIMENSION)],imag2,SQR(IMG_DIMENSION)*sizeof(uchar));
head++;
return 1;
}
class gpu2cpuQueue {
public:
gpu2cpuQueue():size(QUEUE_SIZE),head(0),tail(0){}
~gpu2cpuQueue(){}
__device__ __host__ gpu2cpuQueue& operator=(const gpu2cpuQueue& rhs);
__device__ int produce(double distance);
__host__ int consume(double* distance);
void testadd(){size++;}
void test(){
for(int i=0;i<QUEUE_SIZE;i++)
printf("%f\t",q[i]);
printf("\n");
}
private:
volatile int size;
volatile int head;
volatile int tail;
double q[QUEUE_SIZE];
};
__device__ __host__ gpu2cpuQueue& gpu2cpuQueue::operator=(const gpu2cpuQueue& rhs)
{
this->head=rhs.head;
this->size=rhs.size;
this->tail=rhs.tail;
memcpy(this->q,rhs.q,QUEUE_SIZE*sizeof(*rhs.q));
return *this;
}
static int x=0;
__host__ int gpu2cpuQueue::consume(double* distance)
{
if((!(x%100))&&x<500){
printf("gpu2cpuQueue::consume\n");
printf("tail=%d\thead=%d\n",tail,head);
}
x++;
if(!(tail<head))return 0;
*distance=q[(tail%QUEUE_SIZE)];
printf("gpu2cpuQueue::consume\n");
printf("tail=%d\thead=%d\n",tail,head);
printf("distance=%f\n",*distance);
printf("size=%d\n",size);
size++;
tail++;
return 1;
}
__device__ int gpu2cpuQueue::produce(double distance)
{
if(!(head<size)) return 0;
if(threadIdx.x) return 1;
printf("gpu size=%d\t(head%QUEUE_SIZE)=%d\n",size,(head%QUEUE_SIZE));
q[(head%QUEUE_SIZE)]=distance;
//printf("before\n");
//printf("distance=%f\n",distance);
__threadfence_system();
//printf("after\n");
head++;
__threadfence_system();
return 1;
}
struct QP{
cpu2gpuQueue cpugpu;
gpu2cpuQueue gpucpu;
};
__global__ void test(struct QP* Ptr){
int i;
if(!threadIdx.x) printf("test kernel\n");
__shared__ uchar images[2*SQR(IMG_DIMENSION)];
__shared__ int hist1[HIST_SIZE],hist2[HIST_SIZE];
__shared__ double distance[HIST_SIZE];
//if(threadIdx.x<HIST_SIZE)
//hist1[threadIdx.x]=hist2[threadIdx.x]=0;
//if(!threadIdx.x) printf("test kernel\n");
for(int i=0;i<NREQUESTS;i++)
{
//if(!threadIdx.x)printf("gpu loop i=%d\n",i);
while(!Ptr->cpugpu.consume(images));
if(!threadIdx.x)printf("gpu loop i=%d\n",i);
//gpu_image_to_histogram(images,hist1);
//gpu_image_to_histogram(images+SQR(IMG_DIMENSION),hist2);
//__syncthreads();
//gpu_histogram_distance(hist1,hist2,distance);
//while(!Ptr->gpucpu.produce(distance[0]));
while(!Ptr->gpucpu.produce((double)i));
__syncthreads();
}
}
__global__ void test1(int* Ptr)
{
int i;
for(int i=0;i<NREQUESTS;++i)
Ptr[i]=i;
printf("kernel finish\n");
return;
}
int main(void) {
uchar *images1;
uchar *images2;
CUDA_CHECK( cudaHostAlloc(&images1, N_IMG_PAIRS * IMG_DIMENSION * IMG_DIMENSION, 0) );
CUDA_CHECK( cudaHostAlloc(&images2, N_IMG_PAIRS * IMG_DIMENSION * IMG_DIMENSION, 0) );
load_image_pairs(images1, images2);
double t_start, t_finish;
double total_distance=0,distance=0;
int i=NREQUESTS,finished=0;
struct QP *cpuqp,*gpuqp;
CUDA_CHECK( cudaHostAlloc(&cpuqp, sizeof(struct QP), cudaHostAllocWriteCombined) );
cpuqp->cpugpu=cpu2gpuQueue();
cpuqp->gpucpu=gpu2cpuQueue();
CUDA_CHECK( cudaHostGetDevicePointer(&gpuqp,cpuqp,0) );
printf("\n=== CPU ===\n");
int histogram1[256];
int histogram2[256];
t_start = get_time_msec();
for (int i = 0; i < NREQUESTS; i++) {
int img_idx = i % N_IMG_PAIRS;
image_to_histogram(&images1[img_idx * IMG_DIMENSION * IMG_DIMENSION], histogram1);
image_to_histogram(&images2[img_idx * IMG_DIMENSION * IMG_DIMENSION], histogram2);
total_distance += histogram_distance(histogram1, histogram2);
}
t_finish = get_time_msec();
printf("average distance between images %f\n", total_distance / NREQUESTS);
printf("throughput = %lf (req/sec)\n", NREQUESTS / (t_finish - t_start) * 1e+3);
total_distance=0;
test<<<1, 1024>>>(gpuqp);
cpuqp->gpucpu.testadd();
//printf("after\n");
for(int i=0;i<NREQUESTS;i++)
{
printf("cpu loop i=%d\n",i);
distance=0;
if(cpuqp->gpucpu.consume(&distance))
{
//printf("distance=%f\n",distance);
total_distance+=distance;
finished++;
printf("finished=%d\n",finished);
}
int img_idx = i % N_IMG_PAIRS,j;
//for(j=0;j<SQR(IMG_DIMENSION);++j)printf("%d%d",images1[img_idx * IMG_DIMENSION * IMG_DIMENSION+j],images2[img_idx * IMG_DIMENSION * IMG_DIMENSION+j]);
//printf("\n");
while(!cpuqp->cpugpu.produce(&images1[img_idx * IMG_DIMENSION * IMG_DIMENSION],&images2[img_idx * IMG_DIMENSION * IMG_DIMENSION]));
cpuqp->gpucpu.test();
}
printf("finish loop\n");
/*while(finished<NREQUESTS)
{
if(cpuqp->gpucpu.consume(&distance))
{
//printf("distance=%f\n",distance);
total_distance+=distance;
finished++;
printf("finished=%d\n",finished);
}
}*//*
printf("finish loop 2\n");
CUDA_CHECK( cudaDeviceSynchronize());
cpuqp->gpucpu.test();
printf("average distance between images %f\n", total_distance / NREQUESTS);
return 0;
}*/ |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | /* *
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <unistd.h>
#include <assert.h>
#include <string.h>
/*
#define IMG_DIMENSION 32
#define N_IMG_PAIRS 10000
#define NREQUESTS 6
#define N_STREMS 64
#define HIST_SIZE 256
typedef unsigned char uchar;
#define OUT
#define CUDA_CHECK(f) do { \
cudaError_t e = f; \
if (e != cudaSuccess) { \
printf("Cuda failure %s:%d: '%s'\n", __FILE__, __LINE__, cudaGetErrorString(e)); \
exit(1); \
} \
} while (0)
#define SQR(a) ((a) * (a))
#define QUEUE_SIZE 10
__device__ __host__ bool is_in_image_bounds(int i, int j) {
return (i >= 0) && (i < IMG_DIMENSION) && (j >= 0) && (j < IMG_DIMENSION);
}
__device__ __host__ uchar local_binary_pattern(uchar *image, int i, int j) {
uchar center = image[i * IMG_DIMENSION + j];
uchar pattern = 0;
if (is_in_image_bounds(i - 1, j - 1)) pattern |= (image[(i - 1) * IMG_DIMENSION + (j - 1)] >= center) << 7;
if (is_in_image_bounds(i - 1, j )) pattern |= (image[(i - 1) * IMG_DIMENSION + (j )] >= center) << 6;
if (is_in_image_bounds(i - 1, j + 1)) pattern |= (image[(i - 1) * IMG_DIMENSION + (j + 1)] >= center) << 5;
if (is_in_image_bounds(i , j + 1)) pattern |= (image[(i ) * IMG_DIMENSION + (j + 1)] >= center) << 4;
if (is_in_image_bounds(i + 1, j + 1)) pattern |= (image[(i + 1) * IMG_DIMENSION + (j + 1)] >= center) << 3;
if (is_in_image_bounds(i + 1, j )) pattern |= (image[(i + 1) * IMG_DIMENSION + (j )] >= center) << 2;
if (is_in_image_bounds(i + 1, j - 1)) pattern |= (image[(i + 1) * IMG_DIMENSION + (j - 1)] >= center) << 1;
if (is_in_image_bounds(i , j - 1)) pattern |= (image[(i ) * IMG_DIMENSION + (j - 1)] >= center) << 0;
return pattern;
}
__device__ void gpu_image_to_histogram(uchar *image, int *histogram) {
uchar pattern = local_binary_pattern(image, threadIdx.x / IMG_DIMENSION, threadIdx.x % IMG_DIMENSION);
atomicAdd(&histogram[pattern], 1);
}
__device__ void gpu_histogram_distance(int *h1, int *h2, double *distance) {
int length = 256;
int tid = threadIdx.x;
if(tid<length){
distance[tid] = 0;
if (h1[tid] + h2[tid] != 0) {
distance[tid] = ((double)SQR(h1[tid] - h2[tid])) / (h1[tid] + h2[tid]);
}
h1[tid] = h2[tid]=0;
}
__syncthreads();
while (length > 1) {
if (threadIdx.x < length / 2) {
distance[tid] = distance[tid] + distance[tid + length / 2];
}
length /= 2;
__syncthreads();
}
}
void image_to_histogram(uchar *image, int *histogram) {
memset(histogram, 0, sizeof(int) * 256);
for (int i = 0; i < IMG_DIMENSION; i++) {
for (int j = 0; j < IMG_DIMENSION; j++) {
uchar pattern = local_binary_pattern(image, i, j);
histogram[pattern]++;
}
}
}
double histogram_distance(int *h1, int *h2) {
/* we'll use the chi-square distance *//*
double distance = 0;
for (int i = 0; i < 256; i++) {
if (h1[i] + h2[i] != 0) {
distance += ((double)SQR(h1[i] - h2[i])) / (h1[i] + h2[i]);
}
}
return distance;
}
double static inline get_time_msec(void) {
struct timeval t;
gettimeofday(&t, NULL);
return t.tv_sec * 1e+3 + t.tv_usec * 1e-3;
}
/* we won't load actual files. just fill the images with random bytes *//*
void load_image_pairs(uchar *images1, uchar *images2) {
srand(0);
for (int i = 0; i < N_IMG_PAIRS * IMG_DIMENSION * IMG_DIMENSION; i++) {
images1[i] = rand() % 256;
images2[i] = rand() % 256;
}
}
/*************************************************/
/*******CLASS***producer***consumer****queue******/
/*************************************************//*
class cpu2gpuQueue {
public:
cpu2gpuQueue():size(QUEUE_SIZE),head(0),tail(0){/*printf("head=%d\tsize=%d\n",head,size)*//*;}
~cpu2gpuQueue(){}
__device__ __host__ cpu2gpuQueue& operator=(const cpu2gpuQueue& rhs);
__host__ int produce(uchar* imag1,uchar* imag2);
__device__ int consume(uchar* images);
private:
volatile int size;
volatile int head;
volatile int tail;
uchar q[QUEUE_SIZE*SQR(IMG_DIMENSION)];
};
__device__ __host__ cpu2gpuQueue& cpu2gpuQueue::operator=(const cpu2gpuQueue& rhs)
{
this->head=rhs.head;
this->size=rhs.size;
this->tail=rhs.tail;
memcpy(this->q,rhs.q,QUEUE_SIZE*SQR(IMG_DIMENSION)*sizeof(*rhs.q));
return *this;
}
__device__ int cpu2gpuQueue::consume(uchar* images)
{
if(!threadIdx.x)
{
printf("cpu2gpuQueue::consume\n");
printf("tail=%d\thead=%d\n",tail,head);
}
if(!(tail<head))return 0;
/*int i;
for(i=threadIdx.x;i<2*SQR(IMG_DIMENSION);i+=gridDim.x)
images[i]=q[(tail%QUEUE_SIZE)*2*SQR(IMG_DIMENSION)+i];*//*
//make sure all threads copied before increasing the value of tail
__syncthreads();
if(!threadIdx.x)
{
size++;
tail++;
printf("gpu size=%d\n",size);
__threadfence_system();
}
// __syncthreads();
return 1;
}
__host__ int cpu2gpuQueue::produce(uchar* imag1,uchar* imag2)
{
if(!(head<size)){
//printf("head=%d\tsize=%d\ttrue\n",head,size);
return 0;
}
memcpy(&q[(head%QUEUE_SIZE)*2*SQR(IMG_DIMENSION)],imag1,SQR(IMG_DIMENSION)*sizeof(uchar));
memcpy(&q[(head%QUEUE_SIZE)*2*SQR(IMG_DIMENSION)+SQR(IMG_DIMENSION)],imag2,SQR(IMG_DIMENSION)*sizeof(uchar));
head++;
return 1;
}
class gpu2cpuQueue {
public:
gpu2cpuQueue():size(QUEUE_SIZE),head(0),tail(0){}
~gpu2cpuQueue(){}
__device__ __host__ gpu2cpuQueue& operator=(const gpu2cpuQueue& rhs);
__device__ int produce(double distance);
__host__ int consume(double* distance);
void testadd(){size++;}
void test(){
for(int i=0;i<QUEUE_SIZE;i++)
printf("%f\t",q[i]);
printf("\n");
}
private:
volatile int size;
volatile int head;
volatile int tail;
double q[QUEUE_SIZE];
};
__device__ __host__ gpu2cpuQueue& gpu2cpuQueue::operator=(const gpu2cpuQueue& rhs)
{
this->head=rhs.head;
this->size=rhs.size;
this->tail=rhs.tail;
memcpy(this->q,rhs.q,QUEUE_SIZE*sizeof(*rhs.q));
return *this;
}
static int x=0;
__host__ int gpu2cpuQueue::consume(double* distance)
{
if((!(x%100))&&x<500){
printf("gpu2cpuQueue::consume\n");
printf("tail=%d\thead=%d\n",tail,head);
}
x++;
if(!(tail<head))return 0;
*distance=q[(tail%QUEUE_SIZE)];
printf("gpu2cpuQueue::consume\n");
printf("tail=%d\thead=%d\n",tail,head);
printf("distance=%f\n",*distance);
printf("size=%d\n",size);
size++;
tail++;
return 1;
}
__device__ int gpu2cpuQueue::produce(double distance)
{
if(!(head<size)) return 0;
if(threadIdx.x) return 1;
printf("gpu size=%d\t(head%QUEUE_SIZE)=%d\n",size,(head%QUEUE_SIZE));
q[(head%QUEUE_SIZE)]=distance;
//printf("before\n");
//printf("distance=%f\n",distance);
__threadfence_system();
//printf("after\n");
head++;
__threadfence_system();
return 1;
}
struct QP{
cpu2gpuQueue cpugpu;
gpu2cpuQueue gpucpu;
};
__global__ void test(struct QP* Ptr){
int i;
if(!threadIdx.x) printf("test kernel\n");
__shared__ uchar images[2*SQR(IMG_DIMENSION)];
__shared__ int hist1[HIST_SIZE],hist2[HIST_SIZE];
__shared__ double distance[HIST_SIZE];
//if(threadIdx.x<HIST_SIZE)
//hist1[threadIdx.x]=hist2[threadIdx.x]=0;
//if(!threadIdx.x) printf("test kernel\n");
for(int i=0;i<NREQUESTS;i++)
{
//if(!threadIdx.x)printf("gpu loop i=%d\n",i);
while(!Ptr->cpugpu.consume(images));
if(!threadIdx.x)printf("gpu loop i=%d\n",i);
//gpu_image_to_histogram(images,hist1);
//gpu_image_to_histogram(images+SQR(IMG_DIMENSION),hist2);
//__syncthreads();
//gpu_histogram_distance(hist1,hist2,distance);
//while(!Ptr->gpucpu.produce(distance[0]));
while(!Ptr->gpucpu.produce((double)i));
__syncthreads();
}
}
__global__ void test1(int* Ptr)
{
int i;
for(int i=0;i<NREQUESTS;++i)
Ptr[i]=i;
printf("kernel finish\n");
return;
}
int main(void) {
uchar *images1;
uchar *images2;
CUDA_CHECK( cudaHostAlloc(&images1, N_IMG_PAIRS * IMG_DIMENSION * IMG_DIMENSION, 0) );
CUDA_CHECK( cudaHostAlloc(&images2, N_IMG_PAIRS * IMG_DIMENSION * IMG_DIMENSION, 0) );
load_image_pairs(images1, images2);
double t_start, t_finish;
double total_distance=0,distance=0;
int i=NREQUESTS,finished=0;
struct QP *cpuqp,*gpuqp;
CUDA_CHECK( cudaHostAlloc(&cpuqp, sizeof(struct QP), cudaHostAllocWriteCombined) );
cpuqp->cpugpu=cpu2gpuQueue();
cpuqp->gpucpu=gpu2cpuQueue();
CUDA_CHECK( cudaHostGetDevicePointer(&gpuqp,cpuqp,0) );
printf("\n=== CPU ===\n");
int histogram1[256];
int histogram2[256];
t_start = get_time_msec();
for (int i = 0; i < NREQUESTS; i++) {
int img_idx = i % N_IMG_PAIRS;
image_to_histogram(&images1[img_idx * IMG_DIMENSION * IMG_DIMENSION], histogram1);
image_to_histogram(&images2[img_idx * IMG_DIMENSION * IMG_DIMENSION], histogram2);
total_distance += histogram_distance(histogram1, histogram2);
}
t_finish = get_time_msec();
printf("average distance between images %f\n", total_distance / NREQUESTS);
printf("throughput = %lf (req/sec)\n", NREQUESTS / (t_finish - t_start) * 1e+3);
total_distance=0;
test<<<1, 1024>>>(gpuqp);
cpuqp->gpucpu.testadd();
//printf("after\n");
for(int i=0;i<NREQUESTS;i++)
{
printf("cpu loop i=%d\n",i);
distance=0;
if(cpuqp->gpucpu.consume(&distance))
{
//printf("distance=%f\n",distance);
total_distance+=distance;
finished++;
printf("finished=%d\n",finished);
}
int img_idx = i % N_IMG_PAIRS,j;
//for(j=0;j<SQR(IMG_DIMENSION);++j)printf("%d%d",images1[img_idx * IMG_DIMENSION * IMG_DIMENSION+j],images2[img_idx * IMG_DIMENSION * IMG_DIMENSION+j]);
//printf("\n");
while(!cpuqp->cpugpu.produce(&images1[img_idx * IMG_DIMENSION * IMG_DIMENSION],&images2[img_idx * IMG_DIMENSION * IMG_DIMENSION]));
cpuqp->gpucpu.test();
}
printf("finish loop\n");
/*while(finished<NREQUESTS)
{
if(cpuqp->gpucpu.consume(&distance))
{
//printf("distance=%f\n",distance);
total_distance+=distance;
finished++;
printf("finished=%d\n",finished);
}
}*//*
printf("finish loop 2\n");
CUDA_CHECK( cudaDeviceSynchronize());
cpuqp->gpucpu.test();
printf("average distance between images %f\n", total_distance / NREQUESTS);
return 0;
}*/ | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | /* *
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <unistd.h>
#include <assert.h>
#include <string.h>
/*
#define IMG_DIMENSION 32
#define N_IMG_PAIRS 10000
#define NREQUESTS 6
#define N_STREMS 64
#define HIST_SIZE 256
typedef unsigned char uchar;
#define OUT
#define CUDA_CHECK(f) do { \
cudaError_t e = f; \
if (e != cudaSuccess) { \
printf("Cuda failure %s:%d: '%s'\n", __FILE__, __LINE__, cudaGetErrorString(e)); \
exit(1); \
} \
} while (0)
#define SQR(a) ((a) * (a))
#define QUEUE_SIZE 10
__device__ __host__ bool is_in_image_bounds(int i, int j) {
return (i >= 0) && (i < IMG_DIMENSION) && (j >= 0) && (j < IMG_DIMENSION);
}
__device__ __host__ uchar local_binary_pattern(uchar *image, int i, int j) {
uchar center = image[i * IMG_DIMENSION + j];
uchar pattern = 0;
if (is_in_image_bounds(i - 1, j - 1)) pattern |= (image[(i - 1) * IMG_DIMENSION + (j - 1)] >= center) << 7;
if (is_in_image_bounds(i - 1, j )) pattern |= (image[(i - 1) * IMG_DIMENSION + (j )] >= center) << 6;
if (is_in_image_bounds(i - 1, j + 1)) pattern |= (image[(i - 1) * IMG_DIMENSION + (j + 1)] >= center) << 5;
if (is_in_image_bounds(i , j + 1)) pattern |= (image[(i ) * IMG_DIMENSION + (j + 1)] >= center) << 4;
if (is_in_image_bounds(i + 1, j + 1)) pattern |= (image[(i + 1) * IMG_DIMENSION + (j + 1)] >= center) << 3;
if (is_in_image_bounds(i + 1, j )) pattern |= (image[(i + 1) * IMG_DIMENSION + (j )] >= center) << 2;
if (is_in_image_bounds(i + 1, j - 1)) pattern |= (image[(i + 1) * IMG_DIMENSION + (j - 1)] >= center) << 1;
if (is_in_image_bounds(i , j - 1)) pattern |= (image[(i ) * IMG_DIMENSION + (j - 1)] >= center) << 0;
return pattern;
}
__device__ void gpu_image_to_histogram(uchar *image, int *histogram) {
uchar pattern = local_binary_pattern(image, threadIdx.x / IMG_DIMENSION, threadIdx.x % IMG_DIMENSION);
atomicAdd(&histogram[pattern], 1);
}
__device__ void gpu_histogram_distance(int *h1, int *h2, double *distance) {
int length = 256;
int tid = threadIdx.x;
if(tid<length){
distance[tid] = 0;
if (h1[tid] + h2[tid] != 0) {
distance[tid] = ((double)SQR(h1[tid] - h2[tid])) / (h1[tid] + h2[tid]);
}
h1[tid] = h2[tid]=0;
}
__syncthreads();
while (length > 1) {
if (threadIdx.x < length / 2) {
distance[tid] = distance[tid] + distance[tid + length / 2];
}
length /= 2;
__syncthreads();
}
}
void image_to_histogram(uchar *image, int *histogram) {
memset(histogram, 0, sizeof(int) * 256);
for (int i = 0; i < IMG_DIMENSION; i++) {
for (int j = 0; j < IMG_DIMENSION; j++) {
uchar pattern = local_binary_pattern(image, i, j);
histogram[pattern]++;
}
}
}
double histogram_distance(int *h1, int *h2) {
/* we'll use the chi-square distance *//*
double distance = 0;
for (int i = 0; i < 256; i++) {
if (h1[i] + h2[i] != 0) {
distance += ((double)SQR(h1[i] - h2[i])) / (h1[i] + h2[i]);
}
}
return distance;
}
double static inline get_time_msec(void) {
struct timeval t;
gettimeofday(&t, NULL);
return t.tv_sec * 1e+3 + t.tv_usec * 1e-3;
}
/* we won't load actual files. just fill the images with random bytes *//*
void load_image_pairs(uchar *images1, uchar *images2) {
srand(0);
for (int i = 0; i < N_IMG_PAIRS * IMG_DIMENSION * IMG_DIMENSION; i++) {
images1[i] = rand() % 256;
images2[i] = rand() % 256;
}
}
/*************************************************/
/*******CLASS***producer***consumer****queue******/
/*************************************************//*
class cpu2gpuQueue {
public:
cpu2gpuQueue():size(QUEUE_SIZE),head(0),tail(0){/*printf("head=%d\tsize=%d\n",head,size)*//*;}
~cpu2gpuQueue(){}
__device__ __host__ cpu2gpuQueue& operator=(const cpu2gpuQueue& rhs);
__host__ int produce(uchar* imag1,uchar* imag2);
__device__ int consume(uchar* images);
private:
volatile int size;
volatile int head;
volatile int tail;
uchar q[QUEUE_SIZE*SQR(IMG_DIMENSION)];
};
__device__ __host__ cpu2gpuQueue& cpu2gpuQueue::operator=(const cpu2gpuQueue& rhs)
{
this->head=rhs.head;
this->size=rhs.size;
this->tail=rhs.tail;
memcpy(this->q,rhs.q,QUEUE_SIZE*SQR(IMG_DIMENSION)*sizeof(*rhs.q));
return *this;
}
__device__ int cpu2gpuQueue::consume(uchar* images)
{
if(!threadIdx.x)
{
printf("cpu2gpuQueue::consume\n");
printf("tail=%d\thead=%d\n",tail,head);
}
if(!(tail<head))return 0;
/*int i;
for(i=threadIdx.x;i<2*SQR(IMG_DIMENSION);i+=gridDim.x)
images[i]=q[(tail%QUEUE_SIZE)*2*SQR(IMG_DIMENSION)+i];*//*
//make sure all threads copied before increasing the value of tail
__syncthreads();
if(!threadIdx.x)
{
size++;
tail++;
printf("gpu size=%d\n",size);
__threadfence_system();
}
// __syncthreads();
return 1;
}
__host__ int cpu2gpuQueue::produce(uchar* imag1,uchar* imag2)
{
if(!(head<size)){
//printf("head=%d\tsize=%d\ttrue\n",head,size);
return 0;
}
memcpy(&q[(head%QUEUE_SIZE)*2*SQR(IMG_DIMENSION)],imag1,SQR(IMG_DIMENSION)*sizeof(uchar));
memcpy(&q[(head%QUEUE_SIZE)*2*SQR(IMG_DIMENSION)+SQR(IMG_DIMENSION)],imag2,SQR(IMG_DIMENSION)*sizeof(uchar));
head++;
return 1;
}
class gpu2cpuQueue {
public:
gpu2cpuQueue():size(QUEUE_SIZE),head(0),tail(0){}
~gpu2cpuQueue(){}
__device__ __host__ gpu2cpuQueue& operator=(const gpu2cpuQueue& rhs);
__device__ int produce(double distance);
__host__ int consume(double* distance);
void testadd(){size++;}
void test(){
for(int i=0;i<QUEUE_SIZE;i++)
printf("%f\t",q[i]);
printf("\n");
}
private:
volatile int size;
volatile int head;
volatile int tail;
double q[QUEUE_SIZE];
};
__device__ __host__ gpu2cpuQueue& gpu2cpuQueue::operator=(const gpu2cpuQueue& rhs)
{
this->head=rhs.head;
this->size=rhs.size;
this->tail=rhs.tail;
memcpy(this->q,rhs.q,QUEUE_SIZE*sizeof(*rhs.q));
return *this;
}
static int x=0;
__host__ int gpu2cpuQueue::consume(double* distance)
{
if((!(x%100))&&x<500){
printf("gpu2cpuQueue::consume\n");
printf("tail=%d\thead=%d\n",tail,head);
}
x++;
if(!(tail<head))return 0;
*distance=q[(tail%QUEUE_SIZE)];
printf("gpu2cpuQueue::consume\n");
printf("tail=%d\thead=%d\n",tail,head);
printf("distance=%f\n",*distance);
printf("size=%d\n",size);
size++;
tail++;
return 1;
}
__device__ int gpu2cpuQueue::produce(double distance)
{
if(!(head<size)) return 0;
if(threadIdx.x) return 1;
printf("gpu size=%d\t(head%QUEUE_SIZE)=%d\n",size,(head%QUEUE_SIZE));
q[(head%QUEUE_SIZE)]=distance;
//printf("before\n");
//printf("distance=%f\n",distance);
__threadfence_system();
//printf("after\n");
head++;
__threadfence_system();
return 1;
}
struct QP{
cpu2gpuQueue cpugpu;
gpu2cpuQueue gpucpu;
};
__global__ void test(struct QP* Ptr){
int i;
if(!threadIdx.x) printf("test kernel\n");
__shared__ uchar images[2*SQR(IMG_DIMENSION)];
__shared__ int hist1[HIST_SIZE],hist2[HIST_SIZE];
__shared__ double distance[HIST_SIZE];
//if(threadIdx.x<HIST_SIZE)
//hist1[threadIdx.x]=hist2[threadIdx.x]=0;
//if(!threadIdx.x) printf("test kernel\n");
for(int i=0;i<NREQUESTS;i++)
{
//if(!threadIdx.x)printf("gpu loop i=%d\n",i);
while(!Ptr->cpugpu.consume(images));
if(!threadIdx.x)printf("gpu loop i=%d\n",i);
//gpu_image_to_histogram(images,hist1);
//gpu_image_to_histogram(images+SQR(IMG_DIMENSION),hist2);
//__syncthreads();
//gpu_histogram_distance(hist1,hist2,distance);
//while(!Ptr->gpucpu.produce(distance[0]));
while(!Ptr->gpucpu.produce((double)i));
__syncthreads();
}
}
__global__ void test1(int* Ptr)
{
int i;
for(int i=0;i<NREQUESTS;++i)
Ptr[i]=i;
printf("kernel finish\n");
return;
}
int main(void) {
uchar *images1;
uchar *images2;
CUDA_CHECK( cudaHostAlloc(&images1, N_IMG_PAIRS * IMG_DIMENSION * IMG_DIMENSION, 0) );
CUDA_CHECK( cudaHostAlloc(&images2, N_IMG_PAIRS * IMG_DIMENSION * IMG_DIMENSION, 0) );
load_image_pairs(images1, images2);
double t_start, t_finish;
double total_distance=0,distance=0;
int i=NREQUESTS,finished=0;
struct QP *cpuqp,*gpuqp;
CUDA_CHECK( cudaHostAlloc(&cpuqp, sizeof(struct QP), cudaHostAllocWriteCombined) );
cpuqp->cpugpu=cpu2gpuQueue();
cpuqp->gpucpu=gpu2cpuQueue();
CUDA_CHECK( cudaHostGetDevicePointer(&gpuqp,cpuqp,0) );
printf("\n=== CPU ===\n");
int histogram1[256];
int histogram2[256];
t_start = get_time_msec();
for (int i = 0; i < NREQUESTS; i++) {
int img_idx = i % N_IMG_PAIRS;
image_to_histogram(&images1[img_idx * IMG_DIMENSION * IMG_DIMENSION], histogram1);
image_to_histogram(&images2[img_idx * IMG_DIMENSION * IMG_DIMENSION], histogram2);
total_distance += histogram_distance(histogram1, histogram2);
}
t_finish = get_time_msec();
printf("average distance between images %f\n", total_distance / NREQUESTS);
printf("throughput = %lf (req/sec)\n", NREQUESTS / (t_finish - t_start) * 1e+3);
total_distance=0;
test<<<1, 1024>>>(gpuqp);
cpuqp->gpucpu.testadd();
//printf("after\n");
for(int i=0;i<NREQUESTS;i++)
{
printf("cpu loop i=%d\n",i);
distance=0;
if(cpuqp->gpucpu.consume(&distance))
{
//printf("distance=%f\n",distance);
total_distance+=distance;
finished++;
printf("finished=%d\n",finished);
}
int img_idx = i % N_IMG_PAIRS,j;
//for(j=0;j<SQR(IMG_DIMENSION);++j)printf("%d%d",images1[img_idx * IMG_DIMENSION * IMG_DIMENSION+j],images2[img_idx * IMG_DIMENSION * IMG_DIMENSION+j]);
//printf("\n");
while(!cpuqp->cpugpu.produce(&images1[img_idx * IMG_DIMENSION * IMG_DIMENSION],&images2[img_idx * IMG_DIMENSION * IMG_DIMENSION]));
cpuqp->gpucpu.test();
}
printf("finish loop\n");
/*while(finished<NREQUESTS)
{
if(cpuqp->gpucpu.consume(&distance))
{
//printf("distance=%f\n",distance);
total_distance+=distance;
finished++;
printf("finished=%d\n",finished);
}
}*//*
printf("finish loop 2\n");
CUDA_CHECK( cudaDeviceSynchronize());
cpuqp->gpucpu.test();
printf("average distance between images %f\n", total_distance / NREQUESTS);
return 0;
}*/ | .text
.file "cpugpuqueue.hip"
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80 | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0012321c_00000000-6_cpugpuqueue.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2072:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2072:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2095:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2095:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "cpugpuqueue.hip"
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | /*
* How to compile (assume cuda is installed at /usr/local/cuda/)
* nvcc add.cu
* ./a.out
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <cuda_runtime.h>
__global__ void add_kernel(int* a, int* b, int*c){
*c = *a + *b;
}
int main(void)
{
printf("My First CUDA Application\n");
int a, b, c;
int *d_a, *d_b, *d_c;
a = 10; b=20; c=0;
int size = sizeof(int);
// Allocate space for device copies of a, b, c
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
// Copy inputs to device
cudaMemcpy(d_a, &a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, &b, size, cudaMemcpyHostToDevice);
float time;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// Launch add() kernel on GPU
add_kernel<<<1, 1>>>(d_a, d_b, d_c);
cudaDeviceSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("Time to generate: %3.1f ms \n", time);
cudaEventDestroy(start);
cudaEventDestroy(stop);
// Copy result back to host
cudaMemcpy(&c, d_c, size, cudaMemcpyDeviceToHost);
printf("Result is: %d\n", c);
// Cleanup
if(d_a) cudaFree(d_a);
if(d_b) cudaFree(d_b);
if(d_c) cudaFree(d_c);
return 0;
} | code for sm_80
Function : _Z10add_kernelPiS_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ IMAD.MOV.U32 R2, RZ, RZ, c[0x0][0x160] ; /* 0x00005800ff027624 */
/* 0x000fe200078e00ff */
/*0020*/ MOV R5, c[0x0][0x16c] ; /* 0x00005b0000057a02 */
/* 0x000fe20000000f00 */
/*0030*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x0][0x168] ; /* 0x00005a00ff047624 */
/* 0x000fe200078e00ff */
/*0040*/ MOV R3, c[0x0][0x164] ; /* 0x0000590000037a02 */
/* 0x000fe20000000f00 */
/*0050*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*0060*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea8000c1e1900 */
/*0070*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea2000c1e1900 */
/*0080*/ IMAD.MOV.U32 R6, RZ, RZ, c[0x0][0x170] ; /* 0x00005c00ff067624 */
/* 0x000fe200078e00ff */
/*0090*/ MOV R7, c[0x0][0x174] ; /* 0x00005d0000077a02 */
/* 0x000fe40000000f00 */
/*00a0*/ IADD3 R9, R2, R5, RZ ; /* 0x0000000502097210 */
/* 0x004fca0007ffe0ff */
/*00b0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00c0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00d0*/ BRA 0xd0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | /*
* How to compile (assume cuda is installed at /usr/local/cuda/)
* nvcc add.cu
* ./a.out
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <cuda_runtime.h>
__global__ void add_kernel(int* a, int* b, int*c){
*c = *a + *b;
}
int main(void)
{
printf("My First CUDA Application\n");
int a, b, c;
int *d_a, *d_b, *d_c;
a = 10; b=20; c=0;
int size = sizeof(int);
// Allocate space for device copies of a, b, c
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
// Copy inputs to device
cudaMemcpy(d_a, &a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, &b, size, cudaMemcpyHostToDevice);
float time;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// Launch add() kernel on GPU
add_kernel<<<1, 1>>>(d_a, d_b, d_c);
cudaDeviceSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("Time to generate: %3.1f ms \n", time);
cudaEventDestroy(start);
cudaEventDestroy(stop);
// Copy result back to host
cudaMemcpy(&c, d_c, size, cudaMemcpyDeviceToHost);
printf("Result is: %d\n", c);
// Cleanup
if(d_a) cudaFree(d_a);
if(d_b) cudaFree(d_b);
if(d_c) cudaFree(d_c);
return 0;
} | .file "tmpxft_00012949_00000000-6_add.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z34__device_stub__Z10add_kernelPiS_S_PiS_S_
.type _Z34__device_stub__Z10add_kernelPiS_S_PiS_S_, @function
_Z34__device_stub__Z10add_kernelPiS_S_PiS_S_:
.LFB2082:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z10add_kernelPiS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z34__device_stub__Z10add_kernelPiS_S_PiS_S_, .-_Z34__device_stub__Z10add_kernelPiS_S_PiS_S_
.globl _Z10add_kernelPiS_S_
.type _Z10add_kernelPiS_S_, @function
_Z10add_kernelPiS_S_:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z34__device_stub__Z10add_kernelPiS_S_PiS_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z10add_kernelPiS_S_, .-_Z10add_kernelPiS_S_
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "My First CUDA Application\n"
.LC1:
.string "Time to generate: %3.1f ms \n"
.LC2:
.string "Result is: %d\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
leaq .LC0(%rip), %rsi
movl $2, %edi
call __printf_chk@PLT
movl $10, 12(%rsp)
movl $20, 16(%rsp)
movl $0, 20(%rsp)
leaq 24(%rsp), %rdi
movl $4, %esi
call cudaMalloc@PLT
leaq 32(%rsp), %rdi
movl $4, %esi
call cudaMalloc@PLT
leaq 40(%rsp), %rdi
movl $4, %esi
call cudaMalloc@PLT
leaq 12(%rsp), %rsi
movl $1, %ecx
movl $4, %edx
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
leaq 16(%rsp), %rsi
movl $1, %ecx
movl $4, %edx
movq 32(%rsp), %rdi
call cudaMemcpy@PLT
leaq 48(%rsp), %rdi
call cudaEventCreate@PLT
leaq 56(%rsp), %rdi
call cudaEventCreate@PLT
movl $0, %esi
movq 48(%rsp), %rdi
call cudaEventRecord@PLT
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 76(%rsp), %rdx
movl $1, %ecx
movq 64(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L18
.L12:
call cudaDeviceSynchronize@PLT
movl $0, %esi
movq 56(%rsp), %rdi
call cudaEventRecord@PLT
movq 56(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 76(%rsp), %rdi
movq 56(%rsp), %rdx
movq 48(%rsp), %rsi
call cudaEventElapsedTime@PLT
pxor %xmm0, %xmm0
cvtss2sd 76(%rsp), %xmm0
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movq 48(%rsp), %rdi
call cudaEventDestroy@PLT
movq 56(%rsp), %rdi
call cudaEventDestroy@PLT
leaq 20(%rsp), %rdi
movl $2, %ecx
movl $4, %edx
movq 40(%rsp), %rsi
call cudaMemcpy@PLT
movl 20(%rsp), %edx
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 24(%rsp), %rdi
testq %rdi, %rdi
je .L13
call cudaFree@PLT
.L13:
movq 32(%rsp), %rdi
testq %rdi, %rdi
je .L14
call cudaFree@PLT
.L14:
movq 40(%rsp), %rdi
testq %rdi, %rdi
je .L15
call cudaFree@PLT
.L15:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L19
movl $0, %eax
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L18:
.cfi_restore_state
movq 40(%rsp), %rdx
movq 32(%rsp), %rsi
movq 24(%rsp), %rdi
call _Z34__device_stub__Z10add_kernelPiS_S_PiS_S_
jmp .L12
.L19:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC3:
.string "_Z10add_kernelPiS_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq _Z10add_kernelPiS_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | /*
* How to compile (assume cuda is installed at /usr/local/cuda/)
* nvcc add.cu
* ./a.out
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <cuda_runtime.h>
__global__ void add_kernel(int* a, int* b, int*c){
*c = *a + *b;
}
int main(void)
{
printf("My First CUDA Application\n");
int a, b, c;
int *d_a, *d_b, *d_c;
a = 10; b=20; c=0;
int size = sizeof(int);
// Allocate space for device copies of a, b, c
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
// Copy inputs to device
cudaMemcpy(d_a, &a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, &b, size, cudaMemcpyHostToDevice);
float time;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// Launch add() kernel on GPU
add_kernel<<<1, 1>>>(d_a, d_b, d_c);
cudaDeviceSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("Time to generate: %3.1f ms \n", time);
cudaEventDestroy(start);
cudaEventDestroy(stop);
// Copy result back to host
cudaMemcpy(&c, d_c, size, cudaMemcpyDeviceToHost);
printf("Result is: %d\n", c);
// Cleanup
if(d_a) cudaFree(d_a);
if(d_b) cudaFree(d_b);
if(d_c) cudaFree(d_c);
return 0;
} | /*
* How to compile (assume cuda is installed at /usr/local/cuda/)
* nvcc add.cu
* ./a.out
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <hip/hip_runtime.h>
__global__ void add_kernel(int* a, int* b, int*c){
*c = *a + *b;
}
int main(void)
{
printf("My First CUDA Application\n");
int a, b, c;
int *d_a, *d_b, *d_c;
a = 10; b=20; c=0;
int size = sizeof(int);
// Allocate space for device copies of a, b, c
hipMalloc((void **)&d_a, size);
hipMalloc((void **)&d_b, size);
hipMalloc((void **)&d_c, size);
// Copy inputs to device
hipMemcpy(d_a, &a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, &b, size, hipMemcpyHostToDevice);
float time;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
// Launch add() kernel on GPU
add_kernel<<<1, 1>>>(d_a, d_b, d_c);
hipDeviceSynchronize();
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
printf("Time to generate: %3.1f ms \n", time);
hipEventDestroy(start);
hipEventDestroy(stop);
// Copy result back to host
hipMemcpy(&c, d_c, size, hipMemcpyDeviceToHost);
printf("Result is: %d\n", c);
// Cleanup
if(d_a) hipFree(d_a);
if(d_b) hipFree(d_b);
if(d_c) hipFree(d_c);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | /*
* How to compile (assume cuda is installed at /usr/local/cuda/)
* nvcc add.cu
* ./a.out
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <hip/hip_runtime.h>
__global__ void add_kernel(int* a, int* b, int*c){
*c = *a + *b;
}
int main(void)
{
printf("My First CUDA Application\n");
int a, b, c;
int *d_a, *d_b, *d_c;
a = 10; b=20; c=0;
int size = sizeof(int);
// Allocate space for device copies of a, b, c
hipMalloc((void **)&d_a, size);
hipMalloc((void **)&d_b, size);
hipMalloc((void **)&d_c, size);
// Copy inputs to device
hipMemcpy(d_a, &a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, &b, size, hipMemcpyHostToDevice);
float time;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
// Launch add() kernel on GPU
add_kernel<<<1, 1>>>(d_a, d_b, d_c);
hipDeviceSynchronize();
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
printf("Time to generate: %3.1f ms \n", time);
hipEventDestroy(start);
hipEventDestroy(stop);
// Copy result back to host
hipMemcpy(&c, d_c, size, hipMemcpyDeviceToHost);
printf("Result is: %d\n", c);
// Cleanup
if(d_a) hipFree(d_a);
if(d_b) hipFree(d_b);
if(d_c) hipFree(d_c);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z10add_kernelPiS_S_
.globl _Z10add_kernelPiS_S_
.p2align 8
.type _Z10add_kernelPiS_S_,@function
_Z10add_kernelPiS_S_:
s_clause 0x1
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[0:1], s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_load_b32 s2, s[4:5], 0x0
s_load_b32 s3, s[6:7], 0x0
s_waitcnt lgkmcnt(0)
s_add_i32 s2, s3, s2
s_delay_alu instid0(SALU_CYCLE_1)
v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
global_store_b32 v0, v1, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z10add_kernelPiS_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 24
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 2
.amdhsa_next_free_sgpr 8
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z10add_kernelPiS_S_, .Lfunc_end0-_Z10add_kernelPiS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 24
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z10add_kernelPiS_S_
.private_segment_fixed_size: 0
.sgpr_count: 8
.sgpr_spill_count: 0
.symbol: _Z10add_kernelPiS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 2
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | /*
* How to compile (assume cuda is installed at /usr/local/cuda/)
* nvcc add.cu
* ./a.out
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <hip/hip_runtime.h>
__global__ void add_kernel(int* a, int* b, int*c){
*c = *a + *b;
}
int main(void)
{
printf("My First CUDA Application\n");
int a, b, c;
int *d_a, *d_b, *d_c;
a = 10; b=20; c=0;
int size = sizeof(int);
// Allocate space for device copies of a, b, c
hipMalloc((void **)&d_a, size);
hipMalloc((void **)&d_b, size);
hipMalloc((void **)&d_c, size);
// Copy inputs to device
hipMemcpy(d_a, &a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, &b, size, hipMemcpyHostToDevice);
float time;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
// Launch add() kernel on GPU
add_kernel<<<1, 1>>>(d_a, d_b, d_c);
hipDeviceSynchronize();
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
printf("Time to generate: %3.1f ms \n", time);
hipEventDestroy(start);
hipEventDestroy(stop);
// Copy result back to host
hipMemcpy(&c, d_c, size, hipMemcpyDeviceToHost);
printf("Result is: %d\n", c);
// Cleanup
if(d_a) hipFree(d_a);
if(d_b) hipFree(d_b);
if(d_c) hipFree(d_c);
return 0;
} | .text
.file "add.hip"
.globl _Z25__device_stub__add_kernelPiS_S_ # -- Begin function _Z25__device_stub__add_kernelPiS_S_
.p2align 4, 0x90
.type _Z25__device_stub__add_kernelPiS_S_,@function
_Z25__device_stub__add_kernelPiS_S_: # @_Z25__device_stub__add_kernelPiS_S_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z10add_kernelPiS_S_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z25__device_stub__add_kernelPiS_S_, .Lfunc_end0-_Z25__device_stub__add_kernelPiS_S_
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
subq $168, %rsp
.cfi_def_cfa_offset 176
movl $.Lstr, %edi
callq puts@PLT
movl $10, 60(%rsp)
movl $20, 56(%rsp)
movl $0, 12(%rsp)
leaq 48(%rsp), %rdi
movl $4, %esi
callq hipMalloc
leaq 40(%rsp), %rdi
movl $4, %esi
callq hipMalloc
leaq 32(%rsp), %rdi
movl $4, %esi
callq hipMalloc
movq 48(%rsp), %rdi
leaq 60(%rsp), %rsi
movl $4, %edx
movl $1, %ecx
callq hipMemcpy
movq 40(%rsp), %rdi
leaq 56(%rsp), %rsi
movl $4, %edx
movl $1, %ecx
callq hipMemcpy
leaq 24(%rsp), %rdi
callq hipEventCreate
leaq 16(%rsp), %rdi
callq hipEventCreate
movq 24(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movabsq $4294967297, %rdi # imm = 0x100000001
movl $1, %esi
movq %rdi, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
movq 48(%rsp), %rax
movq 40(%rsp), %rcx
movq 32(%rsp), %rdx
movq %rax, 160(%rsp)
movq %rcx, 152(%rsp)
movq %rdx, 144(%rsp)
leaq 160(%rsp), %rax
movq %rax, 64(%rsp)
leaq 152(%rsp), %rax
movq %rax, 72(%rsp)
leaq 144(%rsp), %rax
movq %rax, 80(%rsp)
leaq 128(%rsp), %rdi
leaq 112(%rsp), %rsi
leaq 104(%rsp), %rdx
leaq 96(%rsp), %rcx
callq __hipPopCallConfiguration
movq 128(%rsp), %rsi
movl 136(%rsp), %edx
movq 112(%rsp), %rcx
movl 120(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z10add_kernelPiS_S_, %edi
pushq 96(%rsp)
.cfi_adjust_cfa_offset 8
pushq 112(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
callq hipDeviceSynchronize
movq 16(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 16(%rsp), %rdi
callq hipEventSynchronize
movq 24(%rsp), %rsi
movq 16(%rsp), %rdx
leaq 64(%rsp), %rdi
callq hipEventElapsedTime
movss 64(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.1, %edi
movb $1, %al
callq printf
movq 24(%rsp), %rdi
callq hipEventDestroy
movq 16(%rsp), %rdi
callq hipEventDestroy
movq 32(%rsp), %rsi
leaq 12(%rsp), %rdi
movl $4, %edx
movl $2, %ecx
callq hipMemcpy
movl 12(%rsp), %esi
movl $.L.str.2, %edi
xorl %eax, %eax
callq printf
movq 48(%rsp), %rdi
testq %rdi, %rdi
je .LBB1_4
# %bb.3:
callq hipFree
.LBB1_4:
movq 40(%rsp), %rdi
testq %rdi, %rdi
je .LBB1_6
# %bb.5:
callq hipFree
.LBB1_6:
movq 32(%rsp), %rdi
testq %rdi, %rdi
je .LBB1_8
# %bb.7:
callq hipFree
.LBB1_8:
xorl %eax, %eax
addq $168, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z10add_kernelPiS_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z10add_kernelPiS_S_,@object # @_Z10add_kernelPiS_S_
.section .rodata,"a",@progbits
.globl _Z10add_kernelPiS_S_
.p2align 3, 0x0
_Z10add_kernelPiS_S_:
.quad _Z25__device_stub__add_kernelPiS_S_
.size _Z10add_kernelPiS_S_, 8
.type .L.str.1,@object # @.str.1
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.1:
.asciz "Time to generate: %3.1f ms \n"
.size .L.str.1, 30
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "Result is: %d\n"
.size .L.str.2, 15
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z10add_kernelPiS_S_"
.size .L__unnamed_1, 21
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "My First CUDA Application"
.size .Lstr, 26
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z25__device_stub__add_kernelPiS_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z10add_kernelPiS_S_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z10add_kernelPiS_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ IMAD.MOV.U32 R2, RZ, RZ, c[0x0][0x160] ; /* 0x00005800ff027624 */
/* 0x000fe200078e00ff */
/*0020*/ MOV R5, c[0x0][0x16c] ; /* 0x00005b0000057a02 */
/* 0x000fe20000000f00 */
/*0030*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x0][0x168] ; /* 0x00005a00ff047624 */
/* 0x000fe200078e00ff */
/*0040*/ MOV R3, c[0x0][0x164] ; /* 0x0000590000037a02 */
/* 0x000fe20000000f00 */
/*0050*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*0060*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea8000c1e1900 */
/*0070*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea2000c1e1900 */
/*0080*/ IMAD.MOV.U32 R6, RZ, RZ, c[0x0][0x170] ; /* 0x00005c00ff067624 */
/* 0x000fe200078e00ff */
/*0090*/ MOV R7, c[0x0][0x174] ; /* 0x00005d0000077a02 */
/* 0x000fe40000000f00 */
/*00a0*/ IADD3 R9, R2, R5, RZ ; /* 0x0000000502097210 */
/* 0x004fca0007ffe0ff */
/*00b0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00c0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00d0*/ BRA 0xd0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z10add_kernelPiS_S_
.globl _Z10add_kernelPiS_S_
.p2align 8
.type _Z10add_kernelPiS_S_,@function
_Z10add_kernelPiS_S_:
s_clause 0x1
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[0:1], s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_load_b32 s2, s[4:5], 0x0
s_load_b32 s3, s[6:7], 0x0
s_waitcnt lgkmcnt(0)
s_add_i32 s2, s3, s2
s_delay_alu instid0(SALU_CYCLE_1)
v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
global_store_b32 v0, v1, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z10add_kernelPiS_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 24
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 2
.amdhsa_next_free_sgpr 8
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z10add_kernelPiS_S_, .Lfunc_end0-_Z10add_kernelPiS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 24
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z10add_kernelPiS_S_
.private_segment_fixed_size: 0
.sgpr_count: 8
.sgpr_spill_count: 0
.symbol: _Z10add_kernelPiS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 2
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00012949_00000000-6_add.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z34__device_stub__Z10add_kernelPiS_S_PiS_S_
.type _Z34__device_stub__Z10add_kernelPiS_S_PiS_S_, @function
_Z34__device_stub__Z10add_kernelPiS_S_PiS_S_:
.LFB2082:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z10add_kernelPiS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z34__device_stub__Z10add_kernelPiS_S_PiS_S_, .-_Z34__device_stub__Z10add_kernelPiS_S_PiS_S_
.globl _Z10add_kernelPiS_S_
.type _Z10add_kernelPiS_S_, @function
_Z10add_kernelPiS_S_:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z34__device_stub__Z10add_kernelPiS_S_PiS_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z10add_kernelPiS_S_, .-_Z10add_kernelPiS_S_
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "My First CUDA Application\n"
.LC1:
.string "Time to generate: %3.1f ms \n"
.LC2:
.string "Result is: %d\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
leaq .LC0(%rip), %rsi
movl $2, %edi
call __printf_chk@PLT
movl $10, 12(%rsp)
movl $20, 16(%rsp)
movl $0, 20(%rsp)
leaq 24(%rsp), %rdi
movl $4, %esi
call cudaMalloc@PLT
leaq 32(%rsp), %rdi
movl $4, %esi
call cudaMalloc@PLT
leaq 40(%rsp), %rdi
movl $4, %esi
call cudaMalloc@PLT
leaq 12(%rsp), %rsi
movl $1, %ecx
movl $4, %edx
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
leaq 16(%rsp), %rsi
movl $1, %ecx
movl $4, %edx
movq 32(%rsp), %rdi
call cudaMemcpy@PLT
leaq 48(%rsp), %rdi
call cudaEventCreate@PLT
leaq 56(%rsp), %rdi
call cudaEventCreate@PLT
movl $0, %esi
movq 48(%rsp), %rdi
call cudaEventRecord@PLT
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 76(%rsp), %rdx
movl $1, %ecx
movq 64(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L18
.L12:
call cudaDeviceSynchronize@PLT
movl $0, %esi
movq 56(%rsp), %rdi
call cudaEventRecord@PLT
movq 56(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 76(%rsp), %rdi
movq 56(%rsp), %rdx
movq 48(%rsp), %rsi
call cudaEventElapsedTime@PLT
pxor %xmm0, %xmm0
cvtss2sd 76(%rsp), %xmm0
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movq 48(%rsp), %rdi
call cudaEventDestroy@PLT
movq 56(%rsp), %rdi
call cudaEventDestroy@PLT
leaq 20(%rsp), %rdi
movl $2, %ecx
movl $4, %edx
movq 40(%rsp), %rsi
call cudaMemcpy@PLT
movl 20(%rsp), %edx
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 24(%rsp), %rdi
testq %rdi, %rdi
je .L13
call cudaFree@PLT
.L13:
movq 32(%rsp), %rdi
testq %rdi, %rdi
je .L14
call cudaFree@PLT
.L14:
movq 40(%rsp), %rdi
testq %rdi, %rdi
je .L15
call cudaFree@PLT
.L15:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L19
movl $0, %eax
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L18:
.cfi_restore_state
movq 40(%rsp), %rdx
movq 32(%rsp), %rsi
movq 24(%rsp), %rdi
call _Z34__device_stub__Z10add_kernelPiS_S_PiS_S_
jmp .L12
.L19:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC3:
.string "_Z10add_kernelPiS_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq _Z10add_kernelPiS_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "add.hip"
.globl _Z25__device_stub__add_kernelPiS_S_ # -- Begin function _Z25__device_stub__add_kernelPiS_S_
.p2align 4, 0x90
.type _Z25__device_stub__add_kernelPiS_S_,@function
_Z25__device_stub__add_kernelPiS_S_: # @_Z25__device_stub__add_kernelPiS_S_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z10add_kernelPiS_S_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z25__device_stub__add_kernelPiS_S_, .Lfunc_end0-_Z25__device_stub__add_kernelPiS_S_
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
subq $168, %rsp
.cfi_def_cfa_offset 176
movl $.Lstr, %edi
callq puts@PLT
movl $10, 60(%rsp)
movl $20, 56(%rsp)
movl $0, 12(%rsp)
leaq 48(%rsp), %rdi
movl $4, %esi
callq hipMalloc
leaq 40(%rsp), %rdi
movl $4, %esi
callq hipMalloc
leaq 32(%rsp), %rdi
movl $4, %esi
callq hipMalloc
movq 48(%rsp), %rdi
leaq 60(%rsp), %rsi
movl $4, %edx
movl $1, %ecx
callq hipMemcpy
movq 40(%rsp), %rdi
leaq 56(%rsp), %rsi
movl $4, %edx
movl $1, %ecx
callq hipMemcpy
leaq 24(%rsp), %rdi
callq hipEventCreate
leaq 16(%rsp), %rdi
callq hipEventCreate
movq 24(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movabsq $4294967297, %rdi # imm = 0x100000001
movl $1, %esi
movq %rdi, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
movq 48(%rsp), %rax
movq 40(%rsp), %rcx
movq 32(%rsp), %rdx
movq %rax, 160(%rsp)
movq %rcx, 152(%rsp)
movq %rdx, 144(%rsp)
leaq 160(%rsp), %rax
movq %rax, 64(%rsp)
leaq 152(%rsp), %rax
movq %rax, 72(%rsp)
leaq 144(%rsp), %rax
movq %rax, 80(%rsp)
leaq 128(%rsp), %rdi
leaq 112(%rsp), %rsi
leaq 104(%rsp), %rdx
leaq 96(%rsp), %rcx
callq __hipPopCallConfiguration
movq 128(%rsp), %rsi
movl 136(%rsp), %edx
movq 112(%rsp), %rcx
movl 120(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z10add_kernelPiS_S_, %edi
pushq 96(%rsp)
.cfi_adjust_cfa_offset 8
pushq 112(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
callq hipDeviceSynchronize
movq 16(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 16(%rsp), %rdi
callq hipEventSynchronize
movq 24(%rsp), %rsi
movq 16(%rsp), %rdx
leaq 64(%rsp), %rdi
callq hipEventElapsedTime
movss 64(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.1, %edi
movb $1, %al
callq printf
movq 24(%rsp), %rdi
callq hipEventDestroy
movq 16(%rsp), %rdi
callq hipEventDestroy
movq 32(%rsp), %rsi
leaq 12(%rsp), %rdi
movl $4, %edx
movl $2, %ecx
callq hipMemcpy
movl 12(%rsp), %esi
movl $.L.str.2, %edi
xorl %eax, %eax
callq printf
movq 48(%rsp), %rdi
testq %rdi, %rdi
je .LBB1_4
# %bb.3:
callq hipFree
.LBB1_4:
movq 40(%rsp), %rdi
testq %rdi, %rdi
je .LBB1_6
# %bb.5:
callq hipFree
.LBB1_6:
movq 32(%rsp), %rdi
testq %rdi, %rdi
je .LBB1_8
# %bb.7:
callq hipFree
.LBB1_8:
xorl %eax, %eax
addq $168, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z10add_kernelPiS_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z10add_kernelPiS_S_,@object # @_Z10add_kernelPiS_S_
.section .rodata,"a",@progbits
.globl _Z10add_kernelPiS_S_
.p2align 3, 0x0
_Z10add_kernelPiS_S_:
.quad _Z25__device_stub__add_kernelPiS_S_
.size _Z10add_kernelPiS_S_, 8
.type .L.str.1,@object # @.str.1
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.1:
.asciz "Time to generate: %3.1f ms \n"
.size .L.str.1, 30
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "Result is: %d\n"
.size .L.str.2, 15
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z10add_kernelPiS_S_"
.size .L__unnamed_1, 21
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "My First CUDA Application"
.size .Lstr, 26
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z25__device_stub__add_kernelPiS_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z10add_kernelPiS_S_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
#include <algorithm>
#include <cstdlib>
int main(void)
{
// generate 100 random numbers serially
thrust::host_vector<int> h_vec(100);
std::generate(h_vec.begin(), h_vec.end(), rand);
// transfer data to the device
thrust::device_vector<int> d_vec = h_vec;
// sort data on the device
thrust::sort(d_vec.begin(), d_vec.end());
//@@ transfer data back to host
thrust::copy(d_vec.begin(), d_vec.end(), h_vec.begin());
// print h_vec
for (int i = 0; i < h_vec.size(); i++) {
printf("h_vec [%d] = %d\n", i, h_vec[i]);
}
return 0;
} |
#include <hip/hip_runtime.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
#include <algorithm>
#include <cstdlib>
int main(void)
{
// generate 100 random numbers serially
thrust::host_vector<int> h_vec(100);
std::generate(h_vec.begin(), h_vec.end(), rand);
// transfer data to the device
thrust::device_vector<int> d_vec = h_vec;
// sort data on the device
thrust::sort(d_vec.begin(), d_vec.end());
//@@ transfer data back to host
thrust::copy(d_vec.begin(), d_vec.end(), h_vec.begin());
// print h_vec
for (int i = 0; i < h_vec.size(); i++) {
printf("h_vec [%d] = %d\n", i, h_vec[i]);
}
return 0;
} |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #define BLOCK_SIZE 256
#define SOFTENING 1e-9f
typedef struct { float4 *pos, *vel; } BodySystem;
void randomizeBodies(float *data, int n) {
for (int i = 0; i < n; i++) {
data[i] = 2.0f * (rand() / (float)RAND_MAX) - 1.0f;
}
}
__global__
void applyForce(float4 *p, float4 *v, float4 *d, float dt, int n) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
dt = 0.00001f;
if (i < n) {
d[i].x = dt; d[i].y = n;
float Fx = 0.0f; float Fy = 0.0f;
for (int tile = 0; tile < gridDim.x; tile++) {
__shared__ float2 spos[BLOCK_SIZE];
float4 tpos = p[tile * blockDim.x + threadIdx.x];
spos[threadIdx.x] = make_float2(tpos.x, tpos.y);
__syncthreads();
#pragma unroll
for (int j = 0; j < BLOCK_SIZE; j++) {
float dx = spos[j].x - p[i].x;
float dy = spos[j].y - p[i].y;
float distSqr = dx*dx + dy*dy + SOFTENING;
float invDist = rsqrtf(distSqr);
float invDist3 = invDist * invDist * invDist;
float strength = 1.0f;
Fx += dx * invDist3 * strength; Fy += dy * invDist3 * strength;
}
__syncthreads();
}
//v[i].x += 1.5f;
v[i].x += dt*Fx; v[i].y += dt*Fy;
p[i].x += v[i].x*dt; p[i].y += v[i].y*dt;
}
} | .file "tmpxft_000a9c1f_00000000-6_nbody.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2030:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2030:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z15randomizeBodiesPfi
.type _Z15randomizeBodiesPfi, @function
_Z15randomizeBodiesPfi:
.LFB2027:
.cfi_startproc
endbr64
testl %esi, %esi
jle .L8
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $8, %rsp
.cfi_def_cfa_offset 32
movq %rdi, %rbx
movslq %esi, %rsi
leaq (%rdi,%rsi,4), %rbp
.L5:
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
mulss .LC0(%rip), %xmm0
addss %xmm0, %xmm0
subss .LC1(%rip), %xmm0
movss %xmm0, (%rbx)
addq $4, %rbx
cmpq %rbp, %rbx
jne .L5
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L8:
.cfi_restore 3
.cfi_restore 6
ret
.cfi_endproc
.LFE2027:
.size _Z15randomizeBodiesPfi, .-_Z15randomizeBodiesPfi
.globl _Z44__device_stub__Z10applyForceP6float4S0_S0_fiP6float4S0_S0_fi
.type _Z44__device_stub__Z10applyForceP6float4S0_S0_fiP6float4S0_S0_fi, @function
_Z44__device_stub__Z10applyForceP6float4S0_S0_fiP6float4S0_S0_fi:
.LFB2052:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movss %xmm0, 4(%rsp)
movl %ecx, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movq %rsp, %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z10applyForceP6float4S0_S0_fi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2052:
.size _Z44__device_stub__Z10applyForceP6float4S0_S0_fiP6float4S0_S0_fi, .-_Z44__device_stub__Z10applyForceP6float4S0_S0_fiP6float4S0_S0_fi
.globl _Z10applyForceP6float4S0_S0_fi
.type _Z10applyForceP6float4S0_S0_fi, @function
_Z10applyForceP6float4S0_S0_fi:
.LFB2053:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z44__device_stub__Z10applyForceP6float4S0_S0_fiP6float4S0_S0_fi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2053:
.size _Z10applyForceP6float4S0_S0_fi, .-_Z10applyForceP6float4S0_S0_fi
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC2:
.string "_Z10applyForceP6float4S0_S0_fi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2055:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z10applyForceP6float4S0_S0_fi(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2055:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC0:
.long 805306368
.align 4
.LC1:
.long 1065353216
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #define BLOCK_SIZE 256
#define SOFTENING 1e-9f
typedef struct { float4 *pos, *vel; } BodySystem;
void randomizeBodies(float *data, int n) {
for (int i = 0; i < n; i++) {
data[i] = 2.0f * (rand() / (float)RAND_MAX) - 1.0f;
}
}
__global__
void applyForce(float4 *p, float4 *v, float4 *d, float dt, int n) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
dt = 0.00001f;
if (i < n) {
d[i].x = dt; d[i].y = n;
float Fx = 0.0f; float Fy = 0.0f;
for (int tile = 0; tile < gridDim.x; tile++) {
__shared__ float2 spos[BLOCK_SIZE];
float4 tpos = p[tile * blockDim.x + threadIdx.x];
spos[threadIdx.x] = make_float2(tpos.x, tpos.y);
__syncthreads();
#pragma unroll
for (int j = 0; j < BLOCK_SIZE; j++) {
float dx = spos[j].x - p[i].x;
float dy = spos[j].y - p[i].y;
float distSqr = dx*dx + dy*dy + SOFTENING;
float invDist = rsqrtf(distSqr);
float invDist3 = invDist * invDist * invDist;
float strength = 1.0f;
Fx += dx * invDist3 * strength; Fy += dy * invDist3 * strength;
}
__syncthreads();
}
//v[i].x += 1.5f;
v[i].x += dt*Fx; v[i].y += dt*Fy;
p[i].x += v[i].x*dt; p[i].y += v[i].y*dt;
}
} | #include <hip/hip_runtime.h>
#define BLOCK_SIZE 256
#define SOFTENING 1e-9f
typedef struct { float4 *pos, *vel; } BodySystem;
void randomizeBodies(float *data, int n) {
for (int i = 0; i < n; i++) {
data[i] = 2.0f * (rand() / (float)RAND_MAX) - 1.0f;
}
}
__global__
void applyForce(float4 *p, float4 *v, float4 *d, float dt, int n) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
dt = 0.00001f;
if (i < n) {
d[i].x = dt; d[i].y = n;
float Fx = 0.0f; float Fy = 0.0f;
for (int tile = 0; tile < gridDim.x; tile++) {
__shared__ float2 spos[BLOCK_SIZE];
float4 tpos = p[tile * blockDim.x + threadIdx.x];
spos[threadIdx.x] = make_float2(tpos.x, tpos.y);
__syncthreads();
#pragma unroll
for (int j = 0; j < BLOCK_SIZE; j++) {
float dx = spos[j].x - p[i].x;
float dy = spos[j].y - p[i].y;
float distSqr = dx*dx + dy*dy + SOFTENING;
float invDist = rsqrtf(distSqr);
float invDist3 = invDist * invDist * invDist;
float strength = 1.0f;
Fx += dx * invDist3 * strength; Fy += dy * invDist3 * strength;
}
__syncthreads();
}
//v[i].x += 1.5f;
v[i].x += dt*Fx; v[i].y += dt*Fy;
p[i].x += v[i].x*dt; p[i].y += v[i].y*dt;
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#define BLOCK_SIZE 256
#define SOFTENING 1e-9f
typedef struct { float4 *pos, *vel; } BodySystem;
void randomizeBodies(float *data, int n) {
for (int i = 0; i < n; i++) {
data[i] = 2.0f * (rand() / (float)RAND_MAX) - 1.0f;
}
}
__global__
void applyForce(float4 *p, float4 *v, float4 *d, float dt, int n) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
dt = 0.00001f;
if (i < n) {
d[i].x = dt; d[i].y = n;
float Fx = 0.0f; float Fy = 0.0f;
for (int tile = 0; tile < gridDim.x; tile++) {
__shared__ float2 spos[BLOCK_SIZE];
float4 tpos = p[tile * blockDim.x + threadIdx.x];
spos[threadIdx.x] = make_float2(tpos.x, tpos.y);
__syncthreads();
#pragma unroll
for (int j = 0; j < BLOCK_SIZE; j++) {
float dx = spos[j].x - p[i].x;
float dy = spos[j].y - p[i].y;
float distSqr = dx*dx + dy*dy + SOFTENING;
float invDist = rsqrtf(distSqr);
float invDist3 = invDist * invDist * invDist;
float strength = 1.0f;
Fx += dx * invDist3 * strength; Fy += dy * invDist3 * strength;
}
__syncthreads();
}
//v[i].x += 1.5f;
v[i].x += dt*Fx; v[i].y += dt*Fy;
p[i].x += v[i].x*dt; p[i].y += v[i].y*dt;
}
} | .text
.file "nbody.hip"
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function _Z15randomizeBodiesPfi
.LCPI0_0:
.long 0x30000000 # float 4.65661287E-10
.LCPI0_1:
.long 0xbf800000 # float -1
.text
.globl _Z15randomizeBodiesPfi
.p2align 4, 0x90
.type _Z15randomizeBodiesPfi,@function
_Z15randomizeBodiesPfi: # @_Z15randomizeBodiesPfi
.cfi_startproc
# %bb.0:
testl %esi, %esi
jle .LBB0_4
# %bb.1: # %.lr.ph.preheader
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rdi, %rbx
movl %esi, %r14d
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB0_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
callq rand
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
mulss .LCPI0_0(%rip), %xmm0
addss %xmm0, %xmm0
addss .LCPI0_1(%rip), %xmm0
movss %xmm0, (%rbx,%r15,4)
incq %r15
cmpq %r15, %r14
jne .LBB0_2
# %bb.3:
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %r14
.cfi_restore %r15
.LBB0_4: # %._crit_edge
retq
.Lfunc_end0:
.size _Z15randomizeBodiesPfi, .Lfunc_end0-_Z15randomizeBodiesPfi
.cfi_endproc
# -- End function
.globl _Z25__device_stub__applyForceP15HIP_vector_typeIfLj4EES1_S1_fi # -- Begin function _Z25__device_stub__applyForceP15HIP_vector_typeIfLj4EES1_S1_fi
.p2align 4, 0x90
.type _Z25__device_stub__applyForceP15HIP_vector_typeIfLj4EES1_S1_fi,@function
_Z25__device_stub__applyForceP15HIP_vector_typeIfLj4EES1_S1_fi: # @_Z25__device_stub__applyForceP15HIP_vector_typeIfLj4EES1_S1_fi
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movss %xmm0, 4(%rsp)
movl %ecx, (%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
movq %rsp, %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z10applyForceP15HIP_vector_typeIfLj4EES1_S1_fi, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end1:
.size _Z25__device_stub__applyForceP15HIP_vector_typeIfLj4EES1_S1_fi, .Lfunc_end1-_Z25__device_stub__applyForceP15HIP_vector_typeIfLj4EES1_S1_fi
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z10applyForceP15HIP_vector_typeIfLj4EES1_S1_fi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z10applyForceP15HIP_vector_typeIfLj4EES1_S1_fi,@object # @_Z10applyForceP15HIP_vector_typeIfLj4EES1_S1_fi
.section .rodata,"a",@progbits
.globl _Z10applyForceP15HIP_vector_typeIfLj4EES1_S1_fi
.p2align 3, 0x0
_Z10applyForceP15HIP_vector_typeIfLj4EES1_S1_fi:
.quad _Z25__device_stub__applyForceP15HIP_vector_typeIfLj4EES1_S1_fi
.size _Z10applyForceP15HIP_vector_typeIfLj4EES1_S1_fi, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z10applyForceP15HIP_vector_typeIfLj4EES1_S1_fi"
.size .L__unnamed_1, 48
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z25__device_stub__applyForceP15HIP_vector_typeIfLj4EES1_S1_fi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z10applyForceP15HIP_vector_typeIfLj4EES1_S1_fi
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000a9c1f_00000000-6_nbody.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2030:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2030:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z15randomizeBodiesPfi
.type _Z15randomizeBodiesPfi, @function
_Z15randomizeBodiesPfi:
.LFB2027:
.cfi_startproc
endbr64
testl %esi, %esi
jle .L8
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $8, %rsp
.cfi_def_cfa_offset 32
movq %rdi, %rbx
movslq %esi, %rsi
leaq (%rdi,%rsi,4), %rbp
.L5:
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
mulss .LC0(%rip), %xmm0
addss %xmm0, %xmm0
subss .LC1(%rip), %xmm0
movss %xmm0, (%rbx)
addq $4, %rbx
cmpq %rbp, %rbx
jne .L5
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L8:
.cfi_restore 3
.cfi_restore 6
ret
.cfi_endproc
.LFE2027:
.size _Z15randomizeBodiesPfi, .-_Z15randomizeBodiesPfi
.globl _Z44__device_stub__Z10applyForceP6float4S0_S0_fiP6float4S0_S0_fi
.type _Z44__device_stub__Z10applyForceP6float4S0_S0_fiP6float4S0_S0_fi, @function
_Z44__device_stub__Z10applyForceP6float4S0_S0_fiP6float4S0_S0_fi:
.LFB2052:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movss %xmm0, 4(%rsp)
movl %ecx, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movq %rsp, %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z10applyForceP6float4S0_S0_fi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2052:
.size _Z44__device_stub__Z10applyForceP6float4S0_S0_fiP6float4S0_S0_fi, .-_Z44__device_stub__Z10applyForceP6float4S0_S0_fiP6float4S0_S0_fi
.globl _Z10applyForceP6float4S0_S0_fi
.type _Z10applyForceP6float4S0_S0_fi, @function
_Z10applyForceP6float4S0_S0_fi:
.LFB2053:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z44__device_stub__Z10applyForceP6float4S0_S0_fiP6float4S0_S0_fi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2053:
.size _Z10applyForceP6float4S0_S0_fi, .-_Z10applyForceP6float4S0_S0_fi
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC2:
.string "_Z10applyForceP6float4S0_S0_fi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2055:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z10applyForceP6float4S0_S0_fi(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2055:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC0:
.long 805306368
.align 4
.LC1:
.long 1065353216
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "nbody.hip"
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function _Z15randomizeBodiesPfi
.LCPI0_0:
.long 0x30000000 # float 4.65661287E-10
.LCPI0_1:
.long 0xbf800000 # float -1
.text
.globl _Z15randomizeBodiesPfi
.p2align 4, 0x90
.type _Z15randomizeBodiesPfi,@function
_Z15randomizeBodiesPfi: # @_Z15randomizeBodiesPfi
.cfi_startproc
# %bb.0:
testl %esi, %esi
jle .LBB0_4
# %bb.1: # %.lr.ph.preheader
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rdi, %rbx
movl %esi, %r14d
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB0_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
callq rand
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
mulss .LCPI0_0(%rip), %xmm0
addss %xmm0, %xmm0
addss .LCPI0_1(%rip), %xmm0
movss %xmm0, (%rbx,%r15,4)
incq %r15
cmpq %r15, %r14
jne .LBB0_2
# %bb.3:
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %r14
.cfi_restore %r15
.LBB0_4: # %._crit_edge
retq
.Lfunc_end0:
.size _Z15randomizeBodiesPfi, .Lfunc_end0-_Z15randomizeBodiesPfi
.cfi_endproc
# -- End function
.globl _Z25__device_stub__applyForceP15HIP_vector_typeIfLj4EES1_S1_fi # -- Begin function _Z25__device_stub__applyForceP15HIP_vector_typeIfLj4EES1_S1_fi
.p2align 4, 0x90
.type _Z25__device_stub__applyForceP15HIP_vector_typeIfLj4EES1_S1_fi,@function
_Z25__device_stub__applyForceP15HIP_vector_typeIfLj4EES1_S1_fi: # @_Z25__device_stub__applyForceP15HIP_vector_typeIfLj4EES1_S1_fi
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movss %xmm0, 4(%rsp)
movl %ecx, (%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
movq %rsp, %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z10applyForceP15HIP_vector_typeIfLj4EES1_S1_fi, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end1:
.size _Z25__device_stub__applyForceP15HIP_vector_typeIfLj4EES1_S1_fi, .Lfunc_end1-_Z25__device_stub__applyForceP15HIP_vector_typeIfLj4EES1_S1_fi
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z10applyForceP15HIP_vector_typeIfLj4EES1_S1_fi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z10applyForceP15HIP_vector_typeIfLj4EES1_S1_fi,@object # @_Z10applyForceP15HIP_vector_typeIfLj4EES1_S1_fi
.section .rodata,"a",@progbits
.globl _Z10applyForceP15HIP_vector_typeIfLj4EES1_S1_fi
.p2align 3, 0x0
_Z10applyForceP15HIP_vector_typeIfLj4EES1_S1_fi:
.quad _Z25__device_stub__applyForceP15HIP_vector_typeIfLj4EES1_S1_fi
.size _Z10applyForceP15HIP_vector_typeIfLj4EES1_S1_fi, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z10applyForceP15HIP_vector_typeIfLj4EES1_S1_fi"
.size .L__unnamed_1, 48
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z25__device_stub__applyForceP15HIP_vector_typeIfLj4EES1_S1_fi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z10applyForceP15HIP_vector_typeIfLj4EES1_S1_fi
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include <stdlib.h>
#include <fstream>
#include <iostream>
#include <cmath>
#include <string>
using namespace std;
//#define THREADS_PER_BLOCK 32
#define Mask_width 3
#define Mask_radius Mask_width/2
#define TILE_WIDTH 32
#define w (TILE_WIDTH + Mask_width - 1)
#define clamp(x) (min(max((x), 0), 255))
void fillMatrix(int* a, int n)
{
int i;
for (i = 0; i < n*n; ++i)
a[i] = 10;//rand()%5;
}
__global__
void matrixAdition(int *c, int *a, int *b,int n)
{
int ij = threadIdx.x + blockDim.x * blockIdx.x;
if(ij<(n*n))
c[ij] = a[ij] + b[ij];
}
__global__
void matrixAditionRow(int *c, int *a, int *b,int n)
{
int ij = threadIdx.x + blockDim.x * blockIdx.x;
// if(blockDim.x != 0)
//printf("%d salida\n", ij);
for(int i =0 ;i<n;i++)
{
if(ij<n)
c[ij*n+i] = a[ij*n+i] + b[ij*n+i];
}
}
__global__
void convolution_1D_basic_kernel(int *R, int *G, int *B , int *M, int *sd_R, int *sd_G, int *sd_B, int Mask_Width , int Width)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
int r = 0;
int g = 0;
int b = 0;
int N_start_point = i - (Mask_Width/2);
for (int j = 0; j < Mask_Width; j++)
{
if (N_start_point + j >= 0 && N_start_point + j < Width)
{
r += R[N_start_point + j]*M[j];
g += G[N_start_point + j]*M[j];
b += B[N_start_point + j]*M[j];
}
}
sd_R[i] = r;
sd_G[i] = g;
sd_B[i] = b;
}
__global__
void convolution(int *I, const int* __restrict__ M, int *P, int channels, int width, int height)
{
__shared__ int N_ds[w][w];
int k;
for (k = 0; k < channels; k++) {
// First batch loading
int dest = threadIdx.y * TILE_WIDTH + threadIdx.x,
destY = dest / w, destX = dest % w,
srcY = blockIdx.y * TILE_WIDTH + destY - Mask_radius,
srcX = blockIdx.x * TILE_WIDTH + destX - Mask_radius,
src = (srcY * width + srcX) * channels + k;
if (srcY >= 0 && srcY < height && srcX >= 0 && srcX < width)
N_ds[destY][destX] = I[src];
else
N_ds[destY][destX] = 0;
// Second batch loading
dest = threadIdx.y * TILE_WIDTH + threadIdx.x + TILE_WIDTH * TILE_WIDTH;
destY = dest / w, destX = dest % w;
srcY = blockIdx.y * TILE_WIDTH + destY - Mask_radius;
srcX = blockIdx.x * TILE_WIDTH + destX - Mask_radius;
src = (srcY * width + srcX) * channels + k;
if (destY < w) {
if (srcY >= 0 && srcY < height && srcX >= 0 && srcX < width)
N_ds[destY][destX] = I[src];
else
N_ds[destY][destX] = 0;
}
__syncthreads();
int accum = 0;
int y, x;
for (y = 0; y < Mask_width; y++)
for (x = 0; x < Mask_width; x++)
accum += N_ds[threadIdx.y + y][threadIdx.x + x] * M[y * Mask_width + x];
y = blockIdx.y * TILE_WIDTH + threadIdx.y;
x = blockIdx.x * TILE_WIDTH + threadIdx.x;
if (y < height && x < width)
P[(y * width + x) * channels + k] = clamp(accum);
__syncthreads();
}
}
__global__
void matrixAditionCol(int *c, int *a, int *b,int n)
{
int ij = threadIdx.x + blockDim.x * blockIdx.x;
for(int i =0 ;i<n;i++)
{
if(ij<n)
c[ij+n*i] = a[ij+n*i] + b[ij+n*i];
}
}
void printMatrix(string s, int *a , int tam){
cout<<s;
for(int i=0;i<tam;i++)
{
for(int j=0;j<tam;j++)
{
cout<<a[i*tam+j]<<" ";
}
cout<<endl;
}
}
void ReadPPM(int *Pin, char *name)
{
int e1;
string line,s1;
ifstream file(name);
getline(file,line);
getline(file,line);
getline(file,line);
getline(file,line);
int m=0;
while(!file.eof())
{
file>>e1;
// cout<<e1<<endl;
//if(!e1) break;
Pin[m]=e1;
m++;
}
}
void ReadPPM(int *R,int *G , int *B, char *name)
{
int e1;
string line,s1;
ifstream file(name);
getline(file,line);
getline(file,line);
getline(file,line);
getline(file,line);
int m=0;
while(!file.eof())
{
file>>e1;
R[m]=e1;
file>>e1;
G[m]=e1;
file>>e1;
B[m]=e1;
m++;
}
}
int* ReadSizeImg(char * name)
{
int * dim= new int[2];
int fil, col;
string line,s1;
ifstream file(name);
getline(file,line);
getline(file,line);
file>>fil>>col;
dim[0]=fil; dim[1]=col;
return dim;
}
void WritePGM(int * Pout, int fil , int cols, char *name)
{
ofstream file(name);
file<<"P2"<<endl;
file<<"# Function ConvertRGBtoGray @eddyrene"<<endl;
file<<fil<<" "<<cols<<endl;
file<<255<<endl;
int n = fil*cols;
int i=0;
while(i<n)
{
file<<Pout[i]<<endl;
i++;
}
}
void WritePPM(int * Pout, int fil , int cols, char *name)
{
ofstream file(name);
file<<"P3"<<endl;
file<<"# Function ConvertRGBtoGray @eddyrene"<<endl;
file<<fil<<" "<<cols<<endl;
file<<255<<endl;
int n = fil*cols;
int i=0;
while(i<3*n)
{
file<<Pout[i]<<endl;
i++;
}
}
void WritePPM(int * R, int* G,int *B, int fil , int cols, char *name)
{
ofstream file(name);
file<<"P3"<<endl;
file<<"# Function ConvertRGBtoGray @eddyrene"<<endl;
file<<fil<<" "<<cols<<endl;
file<<255<<endl;
int n = fil*cols;
int i=0;
while(i<n)
{
file<<R[i]<<endl;
file<<G[i]<<endl;
file<<B[i]<<endl;
i++;
}
}
void print_vect(int *V, int n){
int i
; for (i = 0; i < n; i++)
printf("%d ", V[i]);
}
int main(int argc, char *argv[])
{
int * R;//,*G,*B;
int * sR;//,*sG,*sB;
int * d_R;//,*d_G,*d_B;
int * sd_R;//,*sd_G,*sd_B;
int * order = ReadSizeImg("img.pgm");
int N=order[0]; int M=order[1];
int THREADS_PER_BLOCK = 32;
int size =3*N*M*sizeof(int);
cout<<"tamano Imagen "<<N<<" "<<M<<" size "<<size<<endl;
int k[9]={-1,0,1,-2,0,2,-1,0,1};
int *d_k;
cudaMalloc((void **)&d_R, size);
cudaMalloc((void **)&sd_R, size);
cudaMalloc((void **)&d_k,9*sizeof(int));
R = (int *)malloc(size);
ReadPPM(R,"img.pgm");
sR = (int *)malloc(size);
cudaMemcpy(d_R, R, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_k, k, 9*sizeof(int), cudaMemcpyHostToDevice);
int blocks= (N + THREADS_PER_BLOCK -1)/THREADS_PER_BLOCK;
dim3 dimGrid(blocks, blocks, 1);
dim3 dimBlock(THREADS_PER_BLOCK,THREADS_PER_BLOCK, 1);
cout<<"blocks : \n"<<blocks<<"\n threds: \n "<<THREADS_PER_BLOCK<<endl;
convolution<<<dimGrid,dimBlock>>>(d_R, d_k ,sd_R,1, N, M);
cudaMemcpy(sR, sd_R, size, cudaMemcpyDeviceToHost);
cout<<"ss"<<endl;
WritePGM(sR, N,M,"siete.ppm");
free(R); //free(G);free(B);
cudaFree(d_R); //cudaFree(d_B);cudaFree(d_G);
cudaFree(sd_R); //cudaFree(sd_B);cudaFree(sd_G);
return 0;
} | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <fstream>
#include <iostream>
#include <cmath>
#include <string>
using namespace std;
//#define THREADS_PER_BLOCK 32
#define Mask_width 3
#define Mask_radius Mask_width/2
#define TILE_WIDTH 32
#define w (TILE_WIDTH + Mask_width - 1)
#define clamp(x) (min(max((x), 0), 255))
void fillMatrix(int* a, int n)
{
int i;
for (i = 0; i < n*n; ++i)
a[i] = 10;//rand()%5;
}
__global__
void matrixAdition(int *c, int *a, int *b,int n)
{
int ij = threadIdx.x + blockDim.x * blockIdx.x;
if(ij<(n*n))
c[ij] = a[ij] + b[ij];
}
__global__
void matrixAditionRow(int *c, int *a, int *b,int n)
{
int ij = threadIdx.x + blockDim.x * blockIdx.x;
// if(blockDim.x != 0)
//printf("%d salida\n", ij);
for(int i =0 ;i<n;i++)
{
if(ij<n)
c[ij*n+i] = a[ij*n+i] + b[ij*n+i];
}
}
__global__
void convolution_1D_basic_kernel(int *R, int *G, int *B , int *M, int *sd_R, int *sd_G, int *sd_B, int Mask_Width , int Width)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
int r = 0;
int g = 0;
int b = 0;
int N_start_point = i - (Mask_Width/2);
for (int j = 0; j < Mask_Width; j++)
{
if (N_start_point + j >= 0 && N_start_point + j < Width)
{
r += R[N_start_point + j]*M[j];
g += G[N_start_point + j]*M[j];
b += B[N_start_point + j]*M[j];
}
}
sd_R[i] = r;
sd_G[i] = g;
sd_B[i] = b;
}
__global__
void convolution(int *I, const int* __restrict__ M, int *P, int channels, int width, int height)
{
__shared__ int N_ds[w][w];
int k;
for (k = 0; k < channels; k++) {
// First batch loading
int dest = threadIdx.y * TILE_WIDTH + threadIdx.x,
destY = dest / w, destX = dest % w,
srcY = blockIdx.y * TILE_WIDTH + destY - Mask_radius,
srcX = blockIdx.x * TILE_WIDTH + destX - Mask_radius,
src = (srcY * width + srcX) * channels + k;
if (srcY >= 0 && srcY < height && srcX >= 0 && srcX < width)
N_ds[destY][destX] = I[src];
else
N_ds[destY][destX] = 0;
// Second batch loading
dest = threadIdx.y * TILE_WIDTH + threadIdx.x + TILE_WIDTH * TILE_WIDTH;
destY = dest / w, destX = dest % w;
srcY = blockIdx.y * TILE_WIDTH + destY - Mask_radius;
srcX = blockIdx.x * TILE_WIDTH + destX - Mask_radius;
src = (srcY * width + srcX) * channels + k;
if (destY < w) {
if (srcY >= 0 && srcY < height && srcX >= 0 && srcX < width)
N_ds[destY][destX] = I[src];
else
N_ds[destY][destX] = 0;
}
__syncthreads();
int accum = 0;
int y, x;
for (y = 0; y < Mask_width; y++)
for (x = 0; x < Mask_width; x++)
accum += N_ds[threadIdx.y + y][threadIdx.x + x] * M[y * Mask_width + x];
y = blockIdx.y * TILE_WIDTH + threadIdx.y;
x = blockIdx.x * TILE_WIDTH + threadIdx.x;
if (y < height && x < width)
P[(y * width + x) * channels + k] = clamp(accum);
__syncthreads();
}
}
__global__
void matrixAditionCol(int *c, int *a, int *b,int n)
{
int ij = threadIdx.x + blockDim.x * blockIdx.x;
for(int i =0 ;i<n;i++)
{
if(ij<n)
c[ij+n*i] = a[ij+n*i] + b[ij+n*i];
}
}
void printMatrix(string s, int *a , int tam){
cout<<s;
for(int i=0;i<tam;i++)
{
for(int j=0;j<tam;j++)
{
cout<<a[i*tam+j]<<" ";
}
cout<<endl;
}
}
void ReadPPM(int *Pin, char *name)
{
int e1;
string line,s1;
ifstream file(name);
getline(file,line);
getline(file,line);
getline(file,line);
getline(file,line);
int m=0;
while(!file.eof())
{
file>>e1;
// cout<<e1<<endl;
//if(!e1) break;
Pin[m]=e1;
m++;
}
}
void ReadPPM(int *R,int *G , int *B, char *name)
{
int e1;
string line,s1;
ifstream file(name);
getline(file,line);
getline(file,line);
getline(file,line);
getline(file,line);
int m=0;
while(!file.eof())
{
file>>e1;
R[m]=e1;
file>>e1;
G[m]=e1;
file>>e1;
B[m]=e1;
m++;
}
}
int* ReadSizeImg(char * name)
{
int * dim= new int[2];
int fil, col;
string line,s1;
ifstream file(name);
getline(file,line);
getline(file,line);
file>>fil>>col;
dim[0]=fil; dim[1]=col;
return dim;
}
void WritePGM(int * Pout, int fil , int cols, char *name)
{
ofstream file(name);
file<<"P2"<<endl;
file<<"# Function ConvertRGBtoGray @eddyrene"<<endl;
file<<fil<<" "<<cols<<endl;
file<<255<<endl;
int n = fil*cols;
int i=0;
while(i<n)
{
file<<Pout[i]<<endl;
i++;
}
}
void WritePPM(int * Pout, int fil , int cols, char *name)
{
ofstream file(name);
file<<"P3"<<endl;
file<<"# Function ConvertRGBtoGray @eddyrene"<<endl;
file<<fil<<" "<<cols<<endl;
file<<255<<endl;
int n = fil*cols;
int i=0;
while(i<3*n)
{
file<<Pout[i]<<endl;
i++;
}
}
void WritePPM(int * R, int* G,int *B, int fil , int cols, char *name)
{
ofstream file(name);
file<<"P3"<<endl;
file<<"# Function ConvertRGBtoGray @eddyrene"<<endl;
file<<fil<<" "<<cols<<endl;
file<<255<<endl;
int n = fil*cols;
int i=0;
while(i<n)
{
file<<R[i]<<endl;
file<<G[i]<<endl;
file<<B[i]<<endl;
i++;
}
}
void print_vect(int *V, int n){
int i
; for (i = 0; i < n; i++)
printf("%d ", V[i]);
}
int main(int argc, char *argv[])
{
int * R;//,*G,*B;
int * sR;//,*sG,*sB;
int * d_R;//,*d_G,*d_B;
int * sd_R;//,*sd_G,*sd_B;
int * order = ReadSizeImg("img.pgm");
int N=order[0]; int M=order[1];
int THREADS_PER_BLOCK = 32;
int size =3*N*M*sizeof(int);
cout<<"tamano Imagen "<<N<<" "<<M<<" size "<<size<<endl;
int k[9]={-1,0,1,-2,0,2,-1,0,1};
int *d_k;
hipMalloc((void **)&d_R, size);
hipMalloc((void **)&sd_R, size);
hipMalloc((void **)&d_k,9*sizeof(int));
R = (int *)malloc(size);
ReadPPM(R,"img.pgm");
sR = (int *)malloc(size);
hipMemcpy(d_R, R, size, hipMemcpyHostToDevice);
hipMemcpy(d_k, k, 9*sizeof(int), hipMemcpyHostToDevice);
int blocks= (N + THREADS_PER_BLOCK -1)/THREADS_PER_BLOCK;
dim3 dimGrid(blocks, blocks, 1);
dim3 dimBlock(THREADS_PER_BLOCK,THREADS_PER_BLOCK, 1);
cout<<"blocks : \n"<<blocks<<"\n threds: \n "<<THREADS_PER_BLOCK<<endl;
convolution<<<dimGrid,dimBlock>>>(d_R, d_k ,sd_R,1, N, M);
hipMemcpy(sR, sd_R, size, hipMemcpyDeviceToHost);
cout<<"ss"<<endl;
WritePGM(sR, N,M,"siete.ppm");
free(R); //free(G);free(B);
hipFree(d_R); //cudaFree(d_B);cudaFree(d_G);
hipFree(sd_R); //cudaFree(sd_B);cudaFree(sd_G);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <fstream>
#include <iostream>
#include <cmath>
#include <string>
using namespace std;
//#define THREADS_PER_BLOCK 32
#define Mask_width 3
#define Mask_radius Mask_width/2
#define TILE_WIDTH 32
#define w (TILE_WIDTH + Mask_width - 1)
#define clamp(x) (min(max((x), 0), 255))
void fillMatrix(int* a, int n)
{
int i;
for (i = 0; i < n*n; ++i)
a[i] = 10;//rand()%5;
}
__global__
void matrixAdition(int *c, int *a, int *b,int n)
{
int ij = threadIdx.x + blockDim.x * blockIdx.x;
if(ij<(n*n))
c[ij] = a[ij] + b[ij];
}
__global__
void matrixAditionRow(int *c, int *a, int *b,int n)
{
int ij = threadIdx.x + blockDim.x * blockIdx.x;
// if(blockDim.x != 0)
//printf("%d salida\n", ij);
for(int i =0 ;i<n;i++)
{
if(ij<n)
c[ij*n+i] = a[ij*n+i] + b[ij*n+i];
}
}
__global__
void convolution_1D_basic_kernel(int *R, int *G, int *B , int *M, int *sd_R, int *sd_G, int *sd_B, int Mask_Width , int Width)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
int r = 0;
int g = 0;
int b = 0;
int N_start_point = i - (Mask_Width/2);
for (int j = 0; j < Mask_Width; j++)
{
if (N_start_point + j >= 0 && N_start_point + j < Width)
{
r += R[N_start_point + j]*M[j];
g += G[N_start_point + j]*M[j];
b += B[N_start_point + j]*M[j];
}
}
sd_R[i] = r;
sd_G[i] = g;
sd_B[i] = b;
}
__global__
void convolution(int *I, const int* __restrict__ M, int *P, int channels, int width, int height)
{
__shared__ int N_ds[w][w];
int k;
for (k = 0; k < channels; k++) {
// First batch loading
int dest = threadIdx.y * TILE_WIDTH + threadIdx.x,
destY = dest / w, destX = dest % w,
srcY = blockIdx.y * TILE_WIDTH + destY - Mask_radius,
srcX = blockIdx.x * TILE_WIDTH + destX - Mask_radius,
src = (srcY * width + srcX) * channels + k;
if (srcY >= 0 && srcY < height && srcX >= 0 && srcX < width)
N_ds[destY][destX] = I[src];
else
N_ds[destY][destX] = 0;
// Second batch loading
dest = threadIdx.y * TILE_WIDTH + threadIdx.x + TILE_WIDTH * TILE_WIDTH;
destY = dest / w, destX = dest % w;
srcY = blockIdx.y * TILE_WIDTH + destY - Mask_radius;
srcX = blockIdx.x * TILE_WIDTH + destX - Mask_radius;
src = (srcY * width + srcX) * channels + k;
if (destY < w) {
if (srcY >= 0 && srcY < height && srcX >= 0 && srcX < width)
N_ds[destY][destX] = I[src];
else
N_ds[destY][destX] = 0;
}
__syncthreads();
int accum = 0;
int y, x;
for (y = 0; y < Mask_width; y++)
for (x = 0; x < Mask_width; x++)
accum += N_ds[threadIdx.y + y][threadIdx.x + x] * M[y * Mask_width + x];
y = blockIdx.y * TILE_WIDTH + threadIdx.y;
x = blockIdx.x * TILE_WIDTH + threadIdx.x;
if (y < height && x < width)
P[(y * width + x) * channels + k] = clamp(accum);
__syncthreads();
}
}
__global__
void matrixAditionCol(int *c, int *a, int *b,int n)
{
int ij = threadIdx.x + blockDim.x * blockIdx.x;
for(int i =0 ;i<n;i++)
{
if(ij<n)
c[ij+n*i] = a[ij+n*i] + b[ij+n*i];
}
}
void printMatrix(string s, int *a , int tam){
cout<<s;
for(int i=0;i<tam;i++)
{
for(int j=0;j<tam;j++)
{
cout<<a[i*tam+j]<<" ";
}
cout<<endl;
}
}
void ReadPPM(int *Pin, char *name)
{
int e1;
string line,s1;
ifstream file(name);
getline(file,line);
getline(file,line);
getline(file,line);
getline(file,line);
int m=0;
while(!file.eof())
{
file>>e1;
// cout<<e1<<endl;
//if(!e1) break;
Pin[m]=e1;
m++;
}
}
void ReadPPM(int *R,int *G , int *B, char *name)
{
int e1;
string line,s1;
ifstream file(name);
getline(file,line);
getline(file,line);
getline(file,line);
getline(file,line);
int m=0;
while(!file.eof())
{
file>>e1;
R[m]=e1;
file>>e1;
G[m]=e1;
file>>e1;
B[m]=e1;
m++;
}
}
int* ReadSizeImg(char * name)
{
int * dim= new int[2];
int fil, col;
string line,s1;
ifstream file(name);
getline(file,line);
getline(file,line);
file>>fil>>col;
dim[0]=fil; dim[1]=col;
return dim;
}
void WritePGM(int * Pout, int fil , int cols, char *name)
{
ofstream file(name);
file<<"P2"<<endl;
file<<"# Function ConvertRGBtoGray @eddyrene"<<endl;
file<<fil<<" "<<cols<<endl;
file<<255<<endl;
int n = fil*cols;
int i=0;
while(i<n)
{
file<<Pout[i]<<endl;
i++;
}
}
void WritePPM(int * Pout, int fil , int cols, char *name)
{
ofstream file(name);
file<<"P3"<<endl;
file<<"# Function ConvertRGBtoGray @eddyrene"<<endl;
file<<fil<<" "<<cols<<endl;
file<<255<<endl;
int n = fil*cols;
int i=0;
while(i<3*n)
{
file<<Pout[i]<<endl;
i++;
}
}
void WritePPM(int * R, int* G,int *B, int fil , int cols, char *name)
{
ofstream file(name);
file<<"P3"<<endl;
file<<"# Function ConvertRGBtoGray @eddyrene"<<endl;
file<<fil<<" "<<cols<<endl;
file<<255<<endl;
int n = fil*cols;
int i=0;
while(i<n)
{
file<<R[i]<<endl;
file<<G[i]<<endl;
file<<B[i]<<endl;
i++;
}
}
void print_vect(int *V, int n){
int i
; for (i = 0; i < n; i++)
printf("%d ", V[i]);
}
int main(int argc, char *argv[])
{
int * R;//,*G,*B;
int * sR;//,*sG,*sB;
int * d_R;//,*d_G,*d_B;
int * sd_R;//,*sd_G,*sd_B;
int * order = ReadSizeImg("img.pgm");
int N=order[0]; int M=order[1];
int THREADS_PER_BLOCK = 32;
int size =3*N*M*sizeof(int);
cout<<"tamano Imagen "<<N<<" "<<M<<" size "<<size<<endl;
int k[9]={-1,0,1,-2,0,2,-1,0,1};
int *d_k;
hipMalloc((void **)&d_R, size);
hipMalloc((void **)&sd_R, size);
hipMalloc((void **)&d_k,9*sizeof(int));
R = (int *)malloc(size);
ReadPPM(R,"img.pgm");
sR = (int *)malloc(size);
hipMemcpy(d_R, R, size, hipMemcpyHostToDevice);
hipMemcpy(d_k, k, 9*sizeof(int), hipMemcpyHostToDevice);
int blocks= (N + THREADS_PER_BLOCK -1)/THREADS_PER_BLOCK;
dim3 dimGrid(blocks, blocks, 1);
dim3 dimBlock(THREADS_PER_BLOCK,THREADS_PER_BLOCK, 1);
cout<<"blocks : \n"<<blocks<<"\n threds: \n "<<THREADS_PER_BLOCK<<endl;
convolution<<<dimGrid,dimBlock>>>(d_R, d_k ,sd_R,1, N, M);
hipMemcpy(sR, sd_R, size, hipMemcpyDeviceToHost);
cout<<"ss"<<endl;
WritePGM(sR, N,M,"siete.ppm");
free(R); //free(G);free(B);
hipFree(d_R); //cudaFree(d_B);cudaFree(d_G);
hipFree(sd_R); //cudaFree(sd_B);cudaFree(sd_G);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z13matrixAditionPiS_S_i
.globl _Z13matrixAditionPiS_S_i
.p2align 8
.type _Z13matrixAditionPiS_S_i,@function
_Z13matrixAditionPiS_S_i:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b32 s3, s[0:1], 0x18
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_mul_i32 s3, s3, s3
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_clause 0x1
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[0:1], s[0:1], 0x10
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s6, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s1, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s4, v0
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s5, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v2, v3, v2
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z13matrixAditionPiS_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z13matrixAditionPiS_S_i, .Lfunc_end0-_Z13matrixAditionPiS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z16matrixAditionRowPiS_S_i
.globl _Z16matrixAditionRowPiS_S_i
.p2align 8
.type _Z16matrixAditionRowPiS_S_i,@function
_Z16matrixAditionRowPiS_S_i:
s_load_b32 s8, s[0:1], 0x18
s_waitcnt lgkmcnt(0)
s_cmp_lt_i32 s8, 1
s_cbranch_scc1 .LBB1_5
s_clause 0x2
s_load_b32 s9, s[0:1], 0x2c
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[2:3], s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s0, s9, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s0, v[0:1]
v_mul_lo_u32 v0, v1, s8
v_cmp_gt_i32_e32 vcc_lo, s8, v1
s_set_inst_prefetch_distance 0x1
s_branch .LBB1_3
.p2align 6
.LBB1_2:
s_or_b32 exec_lo, exec_lo, s1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_add_nc_u32_e32 v0, 1, v0
s_add_i32 s8, s8, -1
s_cmp_eq_u32 s8, 0
s_cbranch_scc1 .LBB1_5
.LBB1_3:
s_and_saveexec_b32 s1, vcc_lo
s_cbranch_execz .LBB1_2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v1, 31, v0
v_lshlrev_b64 v[1:2], 2, v[0:1]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v3, s0, s6, v1
v_add_co_ci_u32_e64 v4, s0, s7, v2, s0
v_add_co_u32 v5, s0, s2, v1
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v6, s0, s3, v2, s0
v_add_co_u32 v1, s0, s4, v1
global_load_b32 v3, v[3:4], off
global_load_b32 v4, v[5:6], off
v_add_co_ci_u32_e64 v2, s0, s5, v2, s0
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v3, v4, v3
global_store_b32 v[1:2], v3, off
s_branch .LBB1_2
.LBB1_5:
s_set_inst_prefetch_distance 0x2
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z16matrixAditionRowPiS_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 7
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z16matrixAditionRowPiS_S_i, .Lfunc_end1-_Z16matrixAditionRowPiS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z27convolution_1D_basic_kernelPiS_S_S_S_S_S_ii
.globl _Z27convolution_1D_basic_kernelPiS_S_S_S_S_S_ii
.p2align 8
.type _Z27convolution_1D_basic_kernelPiS_S_S_S_S_S_ii,@function
_Z27convolution_1D_basic_kernelPiS_S_S_S_S_S_ii:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x4c
s_load_b32 s3, s[0:1], 0x38
v_dual_mov_b32 v4, 0 :: v_dual_mov_b32 v3, 0
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_cmp_lt_i32 s3, 1
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
v_mov_b32_e32 v0, 0
s_cbranch_scc1 .LBB2_5
s_clause 0x1
s_load_b256 s[4:11], s[0:1], 0x0
s_load_b32 s12, s[0:1], 0x3c
s_lshr_b32 s2, s3, 31
v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v3, 0
s_add_i32 s2, s3, s2
v_mov_b32_e32 v6, 0
s_ashr_i32 s2, s2, 1
v_mov_b32_e32 v4, 0
v_subrev_nc_u32_e32 v5, s2, v1
s_set_inst_prefetch_distance 0x1
s_branch .LBB2_3
.p2align 6
.LBB2_2:
s_or_b32 exec_lo, exec_lo, s2
s_add_i32 s3, s3, -1
v_add_nc_u32_e32 v5, 1, v5
s_add_u32 s10, s10, 4
s_addc_u32 s11, s11, 0
s_cmp_eq_u32 s3, 0
s_cbranch_scc1 .LBB2_5
.LBB2_3:
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_cmp_lt_i32_e32 vcc_lo, -1, v5
s_waitcnt lgkmcnt(0)
v_cmp_gt_i32_e64 s2, s12, v5
s_and_b32 s13, vcc_lo, s2
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s2, s13
s_cbranch_execz .LBB2_2
v_lshlrev_b64 v[7:8], 2, v[5:6]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v9, vcc_lo, s6, v7
v_add_co_ci_u32_e32 v10, vcc_lo, s7, v8, vcc_lo
v_add_co_u32 v11, vcc_lo, s8, v7
v_add_co_ci_u32_e32 v12, vcc_lo, s9, v8, vcc_lo
v_add_co_u32 v7, vcc_lo, s4, v7
v_add_co_ci_u32_e32 v8, vcc_lo, s5, v8, vcc_lo
global_load_b32 v2, v[9:10], off
global_load_b32 v10, v[11:12], off
global_load_b32 v11, v[7:8], off
s_load_b32 s13, s[10:11], 0x0
s_waitcnt vmcnt(2) lgkmcnt(0)
v_mad_u64_u32 v[7:8], null, v2, s13, v[4:5]
s_waitcnt vmcnt(1)
v_mad_u64_u32 v[8:9], null, v10, s13, v[3:4]
s_waitcnt vmcnt(0)
v_mad_u64_u32 v[2:3], null, s13, v11, v[0:1]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_dual_mov_b32 v4, v7 :: v_dual_mov_b32 v3, v8
v_mov_b32_e32 v0, v2
s_branch .LBB2_2
.LBB2_5:
s_set_inst_prefetch_distance 0x2
s_clause 0x1
s_load_b128 s[4:7], s[0:1], 0x20
s_load_b64 s[0:1], s[0:1], 0x30
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v2, 31, v1
v_lshlrev_b64 v[1:2], 2, v[1:2]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v5, vcc_lo, s4, v1
v_add_co_ci_u32_e32 v6, vcc_lo, s5, v2, vcc_lo
v_add_co_u32 v7, vcc_lo, s6, v1
v_add_co_ci_u32_e32 v8, vcc_lo, s7, v2, vcc_lo
v_add_co_u32 v1, vcc_lo, s0, v1
v_add_co_ci_u32_e32 v2, vcc_lo, s1, v2, vcc_lo
global_store_b32 v[5:6], v0, off
global_store_b32 v[7:8], v4, off
global_store_b32 v[1:2], v3, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z27convolution_1D_basic_kernelPiS_S_S_S_S_S_ii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 320
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 13
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end2:
.size _Z27convolution_1D_basic_kernelPiS_S_S_S_S_S_ii, .Lfunc_end2-_Z27convolution_1D_basic_kernelPiS_S_S_S_S_S_ii
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z11convolutionPiPKiS_iii
.globl _Z11convolutionPiPKiS_iii
.p2align 8
.type _Z11convolutionPiPKiS_iii,@function
_Z11convolutionPiPKiS_iii:
s_load_b32 s5, s[0:1], 0x18
s_waitcnt lgkmcnt(0)
s_cmp_lt_i32 s5, 1
s_cbranch_scc1 .LBB3_18
v_and_b32_e32 v1, 0x3ff, v0
v_bfe_u32 v8, v0, 10, 10
s_clause 0x2
s_load_b64 s[16:17], s[0:1], 0x1c
s_load_b128 s[8:11], s[0:1], 0x0
s_load_b64 s[6:7], s[0:1], 0x10
s_lshl_b32 s0, s15, 5
s_lshl_b32 s1, s14, 5
v_lshl_add_u32 v3, v8, 5, v1
v_add_nc_u32_e32 v6, s0, v8
v_add_nc_u32_e32 v0, s1, v1
v_lshlrev_b32_e32 v9, 2, v1
s_add_i32 s2, s0, -1
v_mul_hi_u32 v2, v3, 0xf0f0f0f1
v_add_nc_u32_e32 v4, 0x400, v3
v_cmp_gt_u32_e32 vcc_lo, 0x84, v3
s_add_i32 s3, s1, -1
s_mov_b32 s12, 0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4)
v_mul_hi_u32 v5, v4, 0xf0f0f0f1
v_lshrrev_b32_e32 v7, 5, v2
s_waitcnt lgkmcnt(0)
v_mad_u64_u32 v[1:2], null, v6, s16, v[0:1]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4)
v_mul_lo_u32 v10, v7, 34
v_lshrrev_b32_e32 v5, 5, v5
v_cmp_gt_i32_e64 s1, s16, v0
v_add_nc_u32_e32 v11, s2, v7
v_cmp_gt_i32_e64 s0, s17, v6
v_mul_lo_u32 v2, v7, 0x88
v_mul_lo_u32 v12, v5, 34
v_mul_lo_u32 v13, v5, 0x88
v_sub_nc_u32_e32 v3, v3, v10
v_add_nc_u32_e32 v10, s2, v5
s_and_b32 s13, s1, s0
v_cmp_le_i32_e64 s1, s17, v11
v_cmp_gt_i32_e64 s0, 0, v11
v_add_nc_u32_e32 v0, s3, v3
v_sub_nc_u32_e32 v12, v4, v12
v_cmp_gt_i32_e64 s2, s17, v10
v_lshl_add_u32 v2, v3, 2, v2
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
v_mad_u64_u32 v[5:6], null, v11, s16, v[0:1]
v_add_nc_u32_e32 v4, s3, v12
v_cmp_le_i32_e64 s3, s16, v0
v_cmp_gt_i32_e64 s4, 0, v0
v_lshl_add_u32 v3, v12, 2, v13
s_delay_alu instid0(VALU_DEP_4)
v_or_b32_e32 v0, v10, v4
v_mad_u64_u32 v[6:7], null, v10, s16, v[4:5]
s_or_b32 s3, s1, s3
v_cmp_gt_i32_e64 s1, s16, v4
v_mul_lo_u32 v4, v5, s5
s_or_b32 s4, s3, s4
v_cmp_lt_i32_e64 s3, -1, v0
v_mad_u32_u24 v7, v8, 0x88, v9
v_mov_b32_e32 v8, 0
v_mul_lo_u32 v5, v6, s5
v_mul_lo_u32 v6, v1, s5
s_and_b32 s1, s2, s1
s_or_b32 s4, s0, s4
s_and_b32 s14, s1, s3
s_branch .LBB3_3
.LBB3_2:
s_or_b32 exec_lo, exec_lo, s1
s_add_i32 s12, s12, 1
s_waitcnt_vscnt null, 0x0
s_cmp_lg_u32 s12, s5
s_barrier
buffer_gl0_inv
s_cbranch_scc0 .LBB3_18
.LBB3_3:
s_and_saveexec_b32 s0, s4
s_delay_alu instid0(SALU_CYCLE_1)
s_xor_b32 s0, exec_lo, s0
s_cbranch_execz .LBB3_5
ds_store_b32 v2, v8
.LBB3_5:
s_and_not1_saveexec_b32 s1, s0
s_cbranch_execz .LBB3_7
v_add_nc_u32_e32 v0, s12, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v1, 31, v0
v_lshlrev_b64 v[0:1], 2, v[0:1]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v0, s0, s8, v0
v_add_co_ci_u32_e64 v1, s0, s9, v1, s0
global_load_b32 v0, v[0:1], off
s_waitcnt vmcnt(0)
ds_store_b32 v2, v0
.LBB3_7:
s_or_b32 exec_lo, exec_lo, s1
s_and_saveexec_b32 s1, vcc_lo
s_cbranch_execz .LBB3_12
s_and_saveexec_b32 s0, s14
s_delay_alu instid0(SALU_CYCLE_1)
s_xor_b32 s2, exec_lo, s0
s_cbranch_execz .LBB3_10
v_add_nc_u32_e32 v0, s12, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v1, 31, v0
v_lshlrev_b64 v[0:1], 2, v[0:1]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v0, s0, s8, v0
v_add_co_ci_u32_e64 v1, s0, s9, v1, s0
global_load_b32 v0, v[0:1], off
s_waitcnt vmcnt(0)
ds_store_b32 v3, v0
.LBB3_10:
s_and_not1_saveexec_b32 s0, s2
s_cbranch_execz .LBB3_12
ds_store_b32 v3, v8
.LBB3_12:
s_or_b32 exec_lo, exec_lo, s1
v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v9, v7
s_mov_b32 s15, 0
s_mov_b64 s[0:1], s[10:11]
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
.p2align 6
.LBB3_13:
s_mov_b64 s[2:3], s[0:1]
s_mov_b32 s16, 0
.LBB3_14:
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_dual_mov_b32 v10, v0 :: v_dual_add_nc_u32 v1, s16, v9
s_load_b32 s17, s[2:3], 0x0
s_add_i32 s16, s16, 4
s_add_u32 s2, s2, 4
ds_load_b32 v11, v1
s_addc_u32 s3, s3, 0
s_cmp_lg_u32 s16, 12
s_waitcnt lgkmcnt(0)
v_mad_u64_u32 v[0:1], null, s17, v11, v[10:11]
s_cbranch_scc1 .LBB3_14
s_add_i32 s15, s15, 1
v_add_nc_u32_e32 v9, 0x88, v9
s_add_u32 s0, s0, 12
s_addc_u32 s1, s1, 0
s_cmp_lg_u32 s15, 3
s_cbranch_scc1 .LBB3_13
s_and_saveexec_b32 s1, s13
s_cbranch_execz .LBB3_2
v_add_nc_u32_e32 v9, s12, v6
v_med3_i32 v11, v0, 0, 0xff
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v10, 31, v9
v_lshlrev_b64 v[9:10], 2, v[9:10]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v0, s0, s6, v9
v_add_co_ci_u32_e64 v1, s0, s7, v10, s0
global_store_b32 v[0:1], v11, off
s_branch .LBB3_2
.LBB3_18:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z11convolutionPiPKiS_iii
.amdhsa_group_segment_fixed_size 4624
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 36
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 14
.amdhsa_next_free_sgpr 18
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end3:
.size _Z11convolutionPiPKiS_iii, .Lfunc_end3-_Z11convolutionPiPKiS_iii
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z16matrixAditionColPiS_S_i
.globl _Z16matrixAditionColPiS_S_i
.p2align 8
.type _Z16matrixAditionColPiS_S_i,@function
_Z16matrixAditionColPiS_S_i:
s_load_b32 s8, s[0:1], 0x18
s_waitcnt lgkmcnt(0)
s_cmp_lt_i32 s8, 1
s_cbranch_scc1 .LBB4_5
s_clause 0x2
s_load_b32 s9, s[0:1], 0x2c
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[2:3], s[0:1], 0x10
s_mov_b32 s1, s8
s_waitcnt lgkmcnt(0)
s_and_b32 s0, s9, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s0, v[0:1]
v_cmp_gt_i32_e32 vcc_lo, s8, v1
s_set_inst_prefetch_distance 0x1
s_branch .LBB4_3
.p2align 6
.LBB4_2:
s_or_b32 exec_lo, exec_lo, s9
v_add_nc_u32_e32 v1, s8, v1
s_add_i32 s1, s1, -1
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_eq_u32 s1, 0
s_cbranch_scc1 .LBB4_5
.LBB4_3:
s_and_saveexec_b32 s9, vcc_lo
s_cbranch_execz .LBB4_2
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[1:2]
v_add_co_u32 v4, s0, s6, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e64 v5, s0, s7, v3, s0
v_add_co_u32 v6, s0, s2, v2
v_add_co_ci_u32_e64 v7, s0, s3, v3, s0
v_add_co_u32 v2, s0, s4, v2
global_load_b32 v0, v[4:5], off
global_load_b32 v4, v[6:7], off
v_add_co_ci_u32_e64 v3, s0, s5, v3, s0
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v0, v4, v0
global_store_b32 v[2:3], v0, off
s_branch .LBB4_2
.LBB4_5:
s_set_inst_prefetch_distance 0x2
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z16matrixAditionColPiS_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 8
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end4:
.size _Z16matrixAditionColPiS_S_i, .Lfunc_end4-_Z16matrixAditionColPiS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z13matrixAditionPiS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z13matrixAditionPiS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z16matrixAditionRowPiS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z16matrixAditionRowPiS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 7
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 32
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 40
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 48
.size: 8
.value_kind: global_buffer
- .offset: 56
.size: 4
.value_kind: by_value
- .offset: 60
.size: 4
.value_kind: by_value
- .offset: 64
.size: 4
.value_kind: hidden_block_count_x
- .offset: 68
.size: 4
.value_kind: hidden_block_count_y
- .offset: 72
.size: 4
.value_kind: hidden_block_count_z
- .offset: 76
.size: 2
.value_kind: hidden_group_size_x
- .offset: 78
.size: 2
.value_kind: hidden_group_size_y
- .offset: 80
.size: 2
.value_kind: hidden_group_size_z
- .offset: 82
.size: 2
.value_kind: hidden_remainder_x
- .offset: 84
.size: 2
.value_kind: hidden_remainder_y
- .offset: 86
.size: 2
.value_kind: hidden_remainder_z
- .offset: 104
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 112
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 120
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 128
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 320
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z27convolution_1D_basic_kernelPiS_S_S_S_S_S_ii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z27convolution_1D_basic_kernelPiS_S_S_S_S_S_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 13
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .actual_access: read_only
.address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: by_value
.group_segment_fixed_size: 4624
.kernarg_segment_align: 8
.kernarg_segment_size: 36
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z11convolutionPiPKiS_iii
.private_segment_fixed_size: 0
.sgpr_count: 20
.sgpr_spill_count: 0
.symbol: _Z11convolutionPiPKiS_iii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 14
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z16matrixAditionColPiS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z16matrixAditionColPiS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 8
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include <math.h>
#include <time.h>
#include <iostream>
#include <cuda.h>
#define NUMBER_RUNS 1
#define LCG_A 1103515245
#define LCG_C 12345
#define LCG_M 2147483646
#define MAX_TRIES 1000
#define N_LIMIT 20
#define MAX_TEMP_STEPS 100
#define TEMP_START 0.5
#define COOLING 0.99
#define THREADS 1024
#define BOLTZMANN_COEFF 0.01
using namespace std;
struct city {
double x;
double y;
};
struct permutation {
int cost;
int* order;
int nSucc;
};
struct GlobalConstants {
int CITY_N;
city* cities;
unsigned int* randSeeds;
};
//global variables
struct city *cities;
int CITY_N;
//global variables on GPU
__constant__ GlobalConstants cuTspParam;
/* rounding function, but at .5 rounds to the lower int. Due to the TSPLIB
* standard library.
*/
__device__ __host__ __inline__ int nint(float x)
{
return (int) (x + 0.5);
}
/* Randomisation is done by a simple linear congruential generator.
* We use A and C values as done by glibc.
*/
__device__ __inline__ unsigned int rand(unsigned int *x)
{
*x = ((LCG_A * (*x)) + LCG_C) & 0x7fffffff;
return *x;
}
__device__ __inline__ float randomFloat(unsigned int *x)
{
return (float) (rand(x) / (float) LCG_M);
}
__device__ __inline__ double randomDouble(unsigned int *x)
{
return (double) (rand(x) / (double) LCG_M);
}
__device__ __inline__ unsigned int randomInt(unsigned int *x, unsigned int max)
{
return rand(x) % max;
}
__device__ __inline__ bool randomBool(unsigned int *x)
{
if ((randomInt(x, 256) >> 7) & 0x00000001)
return true;
else
return false;
}
__device__ __host__ __inline__ int euclideanDistance(struct city *a, struct city *b)
{
float dx = b->x - a->x;
float dy = b->y - a->y;
return nint((sqrt(dx * dx + dy * dy)));
}
/* Calcuates the delta of the costs given by a new order using reverse
*/
__device__ __inline__ int reverseCost(struct city *cities, int *order, int *n)
{
int cost;
cost = -euclideanDistance(&cities[order[n[0]]], &cities[order[n[2]]]);
cost -= euclideanDistance(&cities[order[n[1]]], &cities[order[n[3]]]);
cost += euclideanDistance(&cities[order[n[0]]], &cities[order[n[3]]]);
cost += euclideanDistance(&cities[order[n[1]]], &cities[order[n[2]]]);
return cost;
}
/* The order of the city is changed by swapping the
* order between n[0] and n[1].
* The swapping is done beginning from the outer end
* going into the middle
*/
__device__ __inline__ void reverse(int *order, int *n)
{
int CITY_N = cuTspParam.CITY_N;
int swaps = (1 + ((n[1] - n[0] + CITY_N) % CITY_N)) / 2; // this many elements have to be swapped to have a complete reversal
for (int j = 0; j < swaps; ++j) {
int k = (n[0] + j) % CITY_N;
int l = (n[1] - j + CITY_N) % CITY_N;
int tmp = order[k];
order[k] = order[l];
order[l] = tmp;
}
}
/* Calculates the delta of the costs of the city order if
* the transportation of this segments (given by n) are actually
* done.
*/
__device__ __inline__ int transportCost(struct city *cities, int *order, int *n)
{
int cost;
cost = -euclideanDistance(&cities[order[n[1]]], &cities[order[n[5]]]);
cost -= euclideanDistance(&cities[order[n[0]]], &cities[order[n[4]]]);
cost -= euclideanDistance(&cities[order[n[2]]], &cities[order[n[3]]]);
cost += euclideanDistance(&cities[order[n[0]]], &cities[order[n[2]]]);
cost += euclideanDistance(&cities[order[n[1]]], &cities[order[n[3]]]);
cost += euclideanDistance(&cities[order[n[4]]], &cities[order[n[5]]]);
return cost;
}
/* Transport the path segment (consisting of the start n[0] and end at n[1]
* to the path given by n[2] and n[3], which are adjacent and the segment is
* to be placed in between. n[4] is the city preceding n[0] and n[5] succeeds
* n[1].
* Transportation should only be done if the metroplis algorithm agrees.
*
*/
__device__ void transport(int *order, int *n)
{
int CITY_N = cuTspParam.CITY_N;
int *newOrder = &order[CITY_N];
int m1 = (n[1] - n[0] + CITY_N) % CITY_N;
int m2 = (n[4] - n[3] + CITY_N) % CITY_N;
int m3 = (n[2] - n[5] + CITY_N) % CITY_N;
int i = 0;
for (int j = 0; j <= m1; ++j) {
newOrder[i++] = order[(j + n[0]) % CITY_N];
}
for (int j = 0; j <= m2; ++j) {
newOrder[i++] = order[(j + n[3]) % CITY_N];
}
for (int j = 0; j <= m3; ++j) {
newOrder[i++] = order[(j + n[5]) % CITY_N];
}
for (int j = 0; j < CITY_N; ++j) {
order[j] = newOrder[j];
}
}
/* Metroplis algorithm: Always take the downhill path and
* sometime take the uphill path to avoid local minima
*/
__device__ __inline__ bool metropolis(const int cost, const double t, unsigned int *x)
{
return cost < 0 || randomDouble(x) < exp((double) (BOLTZMANN_COEFF * -cost / t));
}
__host__ __inline__ void copy_permutation(struct permutation* dest, const struct permutation* src) {
dest->cost = src->cost;
dest->nSucc = src->nSucc;
for (int i = 0; i < CITY_N; ++i) {
dest->order[i] = src->order[i];
}
}
/* Main kernel function */
__global__ void solve( struct permutation *permutations, const float t)
{
struct city *cities = cuTspParam.cities;
int CITY_N = cuTspParam.CITY_N;
int notSeg; // number of cities not on the segment
int maxChangeTries = MAX_TRIES * CITY_N;
int succLimit = N_LIMIT * CITY_N;
int dCost;
bool ans;
int n[6];
int id = blockDim.x * blockIdx.x + threadIdx.x;
struct permutation *perm = &(permutations[id]);
unsigned int *x = cuTspParam.randSeeds;
perm->nSucc = 0;
for (int j = 0; j < maxChangeTries; ++j) {
do {
n[0] = randomInt(x, CITY_N);
n[1] = randomInt(x, CITY_N - 1);
if (n[1] >= n[0])
++n[1];
notSeg = (n[0] - n[1] + CITY_N - 1) % CITY_N;
} while (notSeg < 2);
/* It is randomly choosen whether a transportation or a reversion is done */
if (randomBool(x)) {
n[2] = (n[1] + randomInt(x, abs(notSeg - 1)) + 1) % CITY_N;
n[3] = (n[2] + 1) % CITY_N;
n[4] = (n[0] + CITY_N- 1) % CITY_N;
n[5] = (n[1] + 1) % CITY_N;
dCost = transportCost(cities, perm->order, n);
ans = metropolis(dCost, t, x);
if (ans) {
++perm->nSucc;
perm->cost += dCost;
transport(perm->order, n);
}
} else {
n[2] = (n[0] + CITY_N - 1) % CITY_N;
n[3] = (n[1] + 1) % CITY_N;
dCost = reverseCost(cities, perm->order, n);
ans = metropolis(dCost, t, x);
if (ans) {
++perm->nSucc;
perm->cost += dCost;
reverse(perm->order, n);
}
}
/* Finish early if there are enough successful changes */
if (perm->nSucc > succLimit)
break;
}
}
class Anneal {
private:
/* Calculates the length of the initial path, which is already given.
* This is in O(n)
*/
void initialPath(struct permutation *perm, struct city *cities)
{
int i, i1, i2;
perm->cost= 0;
for (i = 0; i < CITY_N - 1; i++) {
i1 = perm->order[i];
i2 = perm->order[i+1];
perm->cost += euclideanDistance(&cities[i1], &cities[i2]);
}
i1 = perm->order[CITY_N - 1];
i2 = perm->order[0];
perm->cost += euclideanDistance(&cities[i1], &cities[i2]);
cout << "Initial path length: " << perm->cost << endl;
}
void printInformation(struct permutation *currPerm, bool showOrder = true)
{
cout << "Path Length = " << currPerm->cost << endl;
cout << "Successful Moves: " << currPerm->nSucc << endl;
if (showOrder) {
cout << "Order: ";
for (int j = 0; j < CITY_N; j++) {
cout << currPerm->order[j] << " ";
}
}
cout << endl;
}
public:
double runtime;
int resultCost;
Anneal() {}
void order(struct city *cities, int *order)
{
double t = TEMP_START;
long seed = (long) (time(NULL));
cudaError_t cudaStat;
struct permutation* dPermutation;
struct permutation* hPermutation = (struct permutation *) malloc(THREADS * sizeof(struct permutation));
for (int i = 0; i < CITY_N; i++) {
hPermutation[i].order = new int [CITY_N];
}
struct city *dCities;
unsigned int *LCGX = (unsigned int *) malloc(THREADS * sizeof(unsigned int));
unsigned int *dLCGX;
struct permutation *currPerm = (struct permutation *) malloc(sizeof(struct permutation));
currPerm->order = new int [CITY_N];
struct permutation *allMinPerm= (struct permutation *) malloc(sizeof(struct permutation));
allMinPerm->order = new int [CITY_N];
int oldCost = 2147483647;
int repeatCost = 0;
clock_t startAll, endAll; // timer to measure the overall run time
double runtimeAll;
clock_t startCuda, endCuda; //timer to measure the run time of cuda
double cudaRuntime = 0.0f;
startAll = clock();
//initialize RNG
srand(seed);
//initialize seeds for RNG on GPU
for (int i = 0; i < THREADS; ++i) {
LCGX[i] = rand();
}
// Kernel invocation
int threadsPerBlock = 256;
int blocksPerGrid = (THREADS + threadsPerBlock - 1) / threadsPerBlock;
cout << "Threads: " << THREADS << ", Blocks: " << blocksPerGrid << endl;
for (int i = 0; i < CITY_N; i++) {
currPerm->order[i] = order[i];
}
initialPath(currPerm, cities);
copy_permutation(allMinPerm, currPerm);
//allocate and copy #threads permutations on the device
cudaStat = cudaMalloc(&dPermutation, THREADS * sizeof(struct permutation));
if (cudaStat != cudaSuccess) {
cout << "couldn't allocate memory on the device. Exit." << endl;
return;
}
for (int i = 0; i < THREADS; i++) {
int* order;
cudaStat = cudaMalloc(&order, 2 * CITY_N * sizeof(int));
if (cudaStat != cudaSuccess) {
cout << "couldn't allocate memory on the device. Exit." << endl;
return;
}
cudaStat = cudaMemcpy(order, currPerm->order, CITY_N * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStat != cudaSuccess) {
cout << "couldn't allocate memory on the device. Exit." << endl;
return;
}
cudaStat = cudaMemcpy(&dPermutation[i].order, &order, sizeof(int*), cudaMemcpyHostToDevice);
if (cudaStat != cudaSuccess) {
cout << "couldn't allocate memory on the device. Exit." << endl;
return;
}
}
cout<<"After the first call\n";
cudaStat = cudaMalloc(&dCities, CITY_N * sizeof(struct city));
if (cudaStat != cudaSuccess) {
cout << "couldn't allocate memory on the device. Exit." << endl;
return;
}
cudaStat = cudaMemcpy(dCities, cities, CITY_N * sizeof(struct city), cudaMemcpyHostToDevice);
if (cudaStat != cudaSuccess) {
cout << "couldn't copy memory to global memory. Exit." << endl;
return;
}
cudaStat = cudaMalloc(&dLCGX, THREADS * sizeof(unsigned int));
if (cudaStat != cudaSuccess) {
cout << "couldn't allocate memory on the device. Exit." << endl;
return;
}
cudaStat = cudaMemcpy(dLCGX, LCGX, THREADS * sizeof(unsigned int), cudaMemcpyHostToDevice);
if (cudaStat != cudaSuccess) {
cout << "couldn't copy memory to global memory. Exit." << endl;
return;
}
cout<<"Just after the allocation\n";
GlobalConstants params;
params.cities = dCities;
params.randSeeds = dLCGX;
params.CITY_N = CITY_N;
cudaMemcpyToSymbol(cuTspParam, ¶ms, sizeof(GlobalConstants));
cout<<"Just before the for loop\n";
/* Try up to MAX_TEMP_STEPS temperature steps. It could stop before if no kernel
* showed any succesful change or if the solution did not change 5 times
*/
for (int i = 0; i < MAX_TEMP_STEPS; ++i) {
cudaThreadSynchronize();
startCuda = clock();
cout<<"This is the "<<i<<" th loop\n";
//Copies the initial permutation to each result permutation
for (int i = 0; i < THREADS; i++) {
//cudaStat = cudaMemcpy(dPermutation[i].order, currPerm->order, CITY_N * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStat != cudaSuccess) {
cout << "couldn't copy memory to global memory. Exit." << endl;
return;
}
cudaStat = cudaMemcpy(&dPermutation[i].cost, &currPerm->cost, sizeof(int), cudaMemcpyHostToDevice);
if (cudaStat != cudaSuccess) {
cout << "couldn't copy memory to global memory. Exit." << endl;
return;
}
}
cout<<"Just before the kernel call\n";
//invoke cuda
solve<<<blocksPerGrid, threadsPerBlock>>>(dPermutation, t);
cudaStat = cudaThreadSynchronize();
if (cudaStat != cudaSuccess) {
cout << "something went wrong during device execution. Exit." << endl;
return;
}
endCuda = clock();
cudaRuntime += (endCuda - startCuda) * 1000 / CLOCKS_PER_SEC;
for (int i = 0; i < THREADS; i++) {
cudaStat = cudaMemcpy(hPermutation[i].order, dPermutation[i].order, CITY_N * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStat != cudaSuccess) {
cout << "couldn't copy memory from global memory. Exit." << endl;
return;
}
cudaStat = cudaMemcpy(&hPermutation[i].cost, &dPermutation[i].cost, sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStat != cudaSuccess) {
cout << "couldn't copy memory from global memory. Exit." << endl;
return;
}
cudaStat = cudaMemcpy(&hPermutation[i].nSucc, &dPermutation[i].nSucc, sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStat != cudaSuccess) {
cout << "couldn't copy memory from global memory. Exit." << endl;
return;
}
}
/* Loops through all resulting permutations and store the one with minimal length but
* at least one swap.
* If all threads didn't swap, exit the program.
* Takes O(n) time.
*/
int minCost = 2147483647;
bool swap = false;
for (int j = 0; j < THREADS; ++j) {
if (minCost >= hPermutation[j].cost && hPermutation[j].nSucc != 0) {
currPerm = &(hPermutation[j]);
minCost = currPerm->cost;
swap = true;
if (minCost < allMinPerm->cost)
copy_permutation(allMinPerm, currPerm);
//memcpy(allMinPerm, currPerm, sizeof(struct permutation));
}
}
if (!swap) {
cout << "No swaps occured. Exit" << endl;
break;
}
if (oldCost == minCost) {
if (++repeatCost == 5) {
cout << "Cost did not change 5 times in a row. Exit" << endl;
break;
}
} else
repeatCost = 0;
cout << endl << "T = " << t << endl;
//cout << "repeat: " << repeatCost << ", old: " << oldCost << ", new: " << minCost << endl;
printInformation(currPerm, false);
//for (int j = 0; j < THREADS; ++j)
// printInformation(&(hPermutation[j]), false);
oldCost = minCost;
t *= COOLING;
}
endAll = clock();
runtimeAll = (endAll - startAll) / (1.0f * CLOCKS_PER_SEC) * 1000;
cout << endl << "Final Result:" << endl;
cout << "=============" << endl;
printInformation(allMinPerm);
runtime = runtimeAll;
resultCost = allMinPerm->cost;
printf("\nThe program needed an overall time of %.2lf ms.\n", runtimeAll);
printf("%.2lf ms were spent at the CUDA part.\n", cudaRuntime);
printf("So %.2lf ms were spent at the host.", runtimeAll - cudaRuntime);
cudaFree(dPermutation);
cudaFree(dCities);
cudaFree(dLCGX);
free(allMinPerm);
free(LCGX);
free(hPermutation);
}
};
void readFile(char* FILENAME)
{
FILE *fp;
char line[80];
int i = 0;
fp = fopen(FILENAME, "rt");
for (int i = 0; i < 6; i++) {
fgets(line, 80, fp);
if (i == 3) {
char* pch = strtok(line, ":");
pch = strtok(NULL, "\t\n");
CITY_N = atoi(pch);
}
}
cout<<"Number of cities is "<<CITY_N<<endl;
cities = (struct city *) malloc (CITY_N * sizeof(struct city));
while (fgets(line, 80, fp) != NULL && i < CITY_N) {
sscanf(line, "%*d %lf %lf", &(cities[i].x), &(cities[i].y));
++i;
}
}
void printCities(struct city *cities)
{
cout << "Cities: " << endl;
for (int i = 0; i < CITY_N; ++i)
cout << i << ". x: " << cities[i].x << " y: " << cities[i].y << endl;
}
int main(int argc, char* argv[])
{
if (argc < 2) {
printf("Usage: ./tsp inputFiles\n");
exit(-1);
}
readFile(argv[1]);
int *order = (int *) malloc(CITY_N * sizeof(int));
float avgResult = 0.0f;
double avgRuntime = 0.0f;
for (int runs = 0; runs < NUMBER_RUNS; ++runs) {
for (int i = 0; i < CITY_N; ++i)
order[i] = i;
//printCities(cities);
Anneal *a = new Anneal();
a->order(cities, order);
avgResult += a->resultCost / (NUMBER_RUNS * 1.0f);
avgRuntime += a->runtime / (NUMBER_RUNS * 1.0f);
}
cout << endl << endl;
cout << "Average Costs: " << avgResult << endl;
cout << "Average Runtime: " << avgRuntime << endl;
return 0;
} | #include <stdio.h>
#include <math.h>
#include <time.h>
#include <iostream>
#include <hip/hip_runtime.h>
#define NUMBER_RUNS 1
#define LCG_A 1103515245
#define LCG_C 12345
#define LCG_M 2147483646
#define MAX_TRIES 1000
#define N_LIMIT 20
#define MAX_TEMP_STEPS 100
#define TEMP_START 0.5
#define COOLING 0.99
#define THREADS 1024
#define BOLTZMANN_COEFF 0.01
using namespace std;
struct city {
double x;
double y;
};
struct permutation {
int cost;
int* order;
int nSucc;
};
struct GlobalConstants {
int CITY_N;
city* cities;
unsigned int* randSeeds;
};
//global variables
struct city *cities;
int CITY_N;
//global variables on GPU
__constant__ GlobalConstants cuTspParam;
/* rounding function, but at .5 rounds to the lower int. Due to the TSPLIB
* standard library.
*/
__device__ __host__ __inline__ int nint(float x)
{
return (int) (x + 0.5);
}
/* Randomisation is done by a simple linear congruential generator.
* We use A and C values as done by glibc.
*/
__device__ __inline__ unsigned int rand(unsigned int *x)
{
*x = ((LCG_A * (*x)) + LCG_C) & 0x7fffffff;
return *x;
}
__device__ __inline__ float randomFloat(unsigned int *x)
{
return (float) (rand(x) / (float) LCG_M);
}
__device__ __inline__ double randomDouble(unsigned int *x)
{
return (double) (rand(x) / (double) LCG_M);
}
__device__ __inline__ unsigned int randomInt(unsigned int *x, unsigned int max)
{
return rand(x) % max;
}
__device__ __inline__ bool randomBool(unsigned int *x)
{
if ((randomInt(x, 256) >> 7) & 0x00000001)
return true;
else
return false;
}
__device__ __host__ __inline__ int euclideanDistance(struct city *a, struct city *b)
{
float dx = b->x - a->x;
float dy = b->y - a->y;
return nint((sqrt(dx * dx + dy * dy)));
}
/* Calcuates the delta of the costs given by a new order using reverse
*/
__device__ __inline__ int reverseCost(struct city *cities, int *order, int *n)
{
int cost;
cost = -euclideanDistance(&cities[order[n[0]]], &cities[order[n[2]]]);
cost -= euclideanDistance(&cities[order[n[1]]], &cities[order[n[3]]]);
cost += euclideanDistance(&cities[order[n[0]]], &cities[order[n[3]]]);
cost += euclideanDistance(&cities[order[n[1]]], &cities[order[n[2]]]);
return cost;
}
/* The order of the city is changed by swapping the
* order between n[0] and n[1].
* The swapping is done beginning from the outer end
* going into the middle
*/
__device__ __inline__ void reverse(int *order, int *n)
{
int CITY_N = cuTspParam.CITY_N;
int swaps = (1 + ((n[1] - n[0] + CITY_N) % CITY_N)) / 2; // this many elements have to be swapped to have a complete reversal
for (int j = 0; j < swaps; ++j) {
int k = (n[0] + j) % CITY_N;
int l = (n[1] - j + CITY_N) % CITY_N;
int tmp = order[k];
order[k] = order[l];
order[l] = tmp;
}
}
/* Calculates the delta of the costs of the city order if
* the transportation of this segments (given by n) are actually
* done.
*/
__device__ __inline__ int transportCost(struct city *cities, int *order, int *n)
{
int cost;
cost = -euclideanDistance(&cities[order[n[1]]], &cities[order[n[5]]]);
cost -= euclideanDistance(&cities[order[n[0]]], &cities[order[n[4]]]);
cost -= euclideanDistance(&cities[order[n[2]]], &cities[order[n[3]]]);
cost += euclideanDistance(&cities[order[n[0]]], &cities[order[n[2]]]);
cost += euclideanDistance(&cities[order[n[1]]], &cities[order[n[3]]]);
cost += euclideanDistance(&cities[order[n[4]]], &cities[order[n[5]]]);
return cost;
}
/* Transport the path segment (consisting of the start n[0] and end at n[1]
* to the path given by n[2] and n[3], which are adjacent and the segment is
* to be placed in between. n[4] is the city preceding n[0] and n[5] succeeds
* n[1].
* Transportation should only be done if the metroplis algorithm agrees.
*
*/
__device__ void transport(int *order, int *n)
{
int CITY_N = cuTspParam.CITY_N;
int *newOrder = &order[CITY_N];
int m1 = (n[1] - n[0] + CITY_N) % CITY_N;
int m2 = (n[4] - n[3] + CITY_N) % CITY_N;
int m3 = (n[2] - n[5] + CITY_N) % CITY_N;
int i = 0;
for (int j = 0; j <= m1; ++j) {
newOrder[i++] = order[(j + n[0]) % CITY_N];
}
for (int j = 0; j <= m2; ++j) {
newOrder[i++] = order[(j + n[3]) % CITY_N];
}
for (int j = 0; j <= m3; ++j) {
newOrder[i++] = order[(j + n[5]) % CITY_N];
}
for (int j = 0; j < CITY_N; ++j) {
order[j] = newOrder[j];
}
}
/* Metroplis algorithm: Always take the downhill path and
* sometime take the uphill path to avoid local minima
*/
__device__ __inline__ bool metropolis(const int cost, const double t, unsigned int *x)
{
return cost < 0 || randomDouble(x) < exp((double) (BOLTZMANN_COEFF * -cost / t));
}
__host__ __inline__ void copy_permutation(struct permutation* dest, const struct permutation* src) {
dest->cost = src->cost;
dest->nSucc = src->nSucc;
for (int i = 0; i < CITY_N; ++i) {
dest->order[i] = src->order[i];
}
}
/* Main kernel function */
__global__ void solve( struct permutation *permutations, const float t)
{
struct city *cities = cuTspParam.cities;
int CITY_N = cuTspParam.CITY_N;
int notSeg; // number of cities not on the segment
int maxChangeTries = MAX_TRIES * CITY_N;
int succLimit = N_LIMIT * CITY_N;
int dCost;
bool ans;
int n[6];
int id = blockDim.x * blockIdx.x + threadIdx.x;
struct permutation *perm = &(permutations[id]);
unsigned int *x = cuTspParam.randSeeds;
perm->nSucc = 0;
for (int j = 0; j < maxChangeTries; ++j) {
do {
n[0] = randomInt(x, CITY_N);
n[1] = randomInt(x, CITY_N - 1);
if (n[1] >= n[0])
++n[1];
notSeg = (n[0] - n[1] + CITY_N - 1) % CITY_N;
} while (notSeg < 2);
/* It is randomly choosen whether a transportation or a reversion is done */
if (randomBool(x)) {
n[2] = (n[1] + randomInt(x, abs(notSeg - 1)) + 1) % CITY_N;
n[3] = (n[2] + 1) % CITY_N;
n[4] = (n[0] + CITY_N- 1) % CITY_N;
n[5] = (n[1] + 1) % CITY_N;
dCost = transportCost(cities, perm->order, n);
ans = metropolis(dCost, t, x);
if (ans) {
++perm->nSucc;
perm->cost += dCost;
transport(perm->order, n);
}
} else {
n[2] = (n[0] + CITY_N - 1) % CITY_N;
n[3] = (n[1] + 1) % CITY_N;
dCost = reverseCost(cities, perm->order, n);
ans = metropolis(dCost, t, x);
if (ans) {
++perm->nSucc;
perm->cost += dCost;
reverse(perm->order, n);
}
}
/* Finish early if there are enough successful changes */
if (perm->nSucc > succLimit)
break;
}
}
class Anneal {
private:
/* Calculates the length of the initial path, which is already given.
* This is in O(n)
*/
void initialPath(struct permutation *perm, struct city *cities)
{
int i, i1, i2;
perm->cost= 0;
for (i = 0; i < CITY_N - 1; i++) {
i1 = perm->order[i];
i2 = perm->order[i+1];
perm->cost += euclideanDistance(&cities[i1], &cities[i2]);
}
i1 = perm->order[CITY_N - 1];
i2 = perm->order[0];
perm->cost += euclideanDistance(&cities[i1], &cities[i2]);
cout << "Initial path length: " << perm->cost << endl;
}
void printInformation(struct permutation *currPerm, bool showOrder = true)
{
cout << "Path Length = " << currPerm->cost << endl;
cout << "Successful Moves: " << currPerm->nSucc << endl;
if (showOrder) {
cout << "Order: ";
for (int j = 0; j < CITY_N; j++) {
cout << currPerm->order[j] << " ";
}
}
cout << endl;
}
public:
double runtime;
int resultCost;
Anneal() {}
void order(struct city *cities, int *order)
{
double t = TEMP_START;
long seed = (long) (time(NULL));
hipError_t cudaStat;
struct permutation* dPermutation;
struct permutation* hPermutation = (struct permutation *) malloc(THREADS * sizeof(struct permutation));
for (int i = 0; i < CITY_N; i++) {
hPermutation[i].order = new int [CITY_N];
}
struct city *dCities;
unsigned int *LCGX = (unsigned int *) malloc(THREADS * sizeof(unsigned int));
unsigned int *dLCGX;
struct permutation *currPerm = (struct permutation *) malloc(sizeof(struct permutation));
currPerm->order = new int [CITY_N];
struct permutation *allMinPerm= (struct permutation *) malloc(sizeof(struct permutation));
allMinPerm->order = new int [CITY_N];
int oldCost = 2147483647;
int repeatCost = 0;
clock_t startAll, endAll; // timer to measure the overall run time
double runtimeAll;
clock_t startCuda, endCuda; //timer to measure the run time of cuda
double cudaRuntime = 0.0f;
startAll = clock();
//initialize RNG
srand(seed);
//initialize seeds for RNG on GPU
for (int i = 0; i < THREADS; ++i) {
LCGX[i] = rand();
}
// Kernel invocation
int threadsPerBlock = 256;
int blocksPerGrid = (THREADS + threadsPerBlock - 1) / threadsPerBlock;
cout << "Threads: " << THREADS << ", Blocks: " << blocksPerGrid << endl;
for (int i = 0; i < CITY_N; i++) {
currPerm->order[i] = order[i];
}
initialPath(currPerm, cities);
copy_permutation(allMinPerm, currPerm);
//allocate and copy #threads permutations on the device
cudaStat = hipMalloc(&dPermutation, THREADS * sizeof(struct permutation));
if (cudaStat != hipSuccess) {
cout << "couldn't allocate memory on the device. Exit." << endl;
return;
}
for (int i = 0; i < THREADS; i++) {
int* order;
cudaStat = hipMalloc(&order, 2 * CITY_N * sizeof(int));
if (cudaStat != hipSuccess) {
cout << "couldn't allocate memory on the device. Exit." << endl;
return;
}
cudaStat = hipMemcpy(order, currPerm->order, CITY_N * sizeof(int), hipMemcpyHostToDevice);
if (cudaStat != hipSuccess) {
cout << "couldn't allocate memory on the device. Exit." << endl;
return;
}
cudaStat = hipMemcpy(&dPermutation[i].order, &order, sizeof(int*), hipMemcpyHostToDevice);
if (cudaStat != hipSuccess) {
cout << "couldn't allocate memory on the device. Exit." << endl;
return;
}
}
cout<<"After the first call\n";
cudaStat = hipMalloc(&dCities, CITY_N * sizeof(struct city));
if (cudaStat != hipSuccess) {
cout << "couldn't allocate memory on the device. Exit." << endl;
return;
}
cudaStat = hipMemcpy(dCities, cities, CITY_N * sizeof(struct city), hipMemcpyHostToDevice);
if (cudaStat != hipSuccess) {
cout << "couldn't copy memory to global memory. Exit." << endl;
return;
}
cudaStat = hipMalloc(&dLCGX, THREADS * sizeof(unsigned int));
if (cudaStat != hipSuccess) {
cout << "couldn't allocate memory on the device. Exit." << endl;
return;
}
cudaStat = hipMemcpy(dLCGX, LCGX, THREADS * sizeof(unsigned int), hipMemcpyHostToDevice);
if (cudaStat != hipSuccess) {
cout << "couldn't copy memory to global memory. Exit." << endl;
return;
}
cout<<"Just after the allocation\n";
GlobalConstants params;
params.cities = dCities;
params.randSeeds = dLCGX;
params.CITY_N = CITY_N;
hipMemcpyToSymbol(HIP_SYMBOL(cuTspParam), ¶ms, sizeof(GlobalConstants));
cout<<"Just before the for loop\n";
/* Try up to MAX_TEMP_STEPS temperature steps. It could stop before if no kernel
* showed any succesful change or if the solution did not change 5 times
*/
for (int i = 0; i < MAX_TEMP_STEPS; ++i) {
hipDeviceSynchronize();
startCuda = clock();
cout<<"This is the "<<i<<" th loop\n";
//Copies the initial permutation to each result permutation
for (int i = 0; i < THREADS; i++) {
//cudaStat = cudaMemcpy(dPermutation[i].order, currPerm->order, CITY_N * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStat != hipSuccess) {
cout << "couldn't copy memory to global memory. Exit." << endl;
return;
}
cudaStat = hipMemcpy(&dPermutation[i].cost, &currPerm->cost, sizeof(int), hipMemcpyHostToDevice);
if (cudaStat != hipSuccess) {
cout << "couldn't copy memory to global memory. Exit." << endl;
return;
}
}
cout<<"Just before the kernel call\n";
//invoke cuda
solve<<<blocksPerGrid, threadsPerBlock>>>(dPermutation, t);
cudaStat = hipDeviceSynchronize();
if (cudaStat != hipSuccess) {
cout << "something went wrong during device execution. Exit." << endl;
return;
}
endCuda = clock();
cudaRuntime += (endCuda - startCuda) * 1000 / CLOCKS_PER_SEC;
for (int i = 0; i < THREADS; i++) {
cudaStat = hipMemcpy(hPermutation[i].order, dPermutation[i].order, CITY_N * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStat != hipSuccess) {
cout << "couldn't copy memory from global memory. Exit." << endl;
return;
}
cudaStat = hipMemcpy(&hPermutation[i].cost, &dPermutation[i].cost, sizeof(int), hipMemcpyDeviceToHost);
if (cudaStat != hipSuccess) {
cout << "couldn't copy memory from global memory. Exit." << endl;
return;
}
cudaStat = hipMemcpy(&hPermutation[i].nSucc, &dPermutation[i].nSucc, sizeof(int), hipMemcpyDeviceToHost);
if (cudaStat != hipSuccess) {
cout << "couldn't copy memory from global memory. Exit." << endl;
return;
}
}
/* Loops through all resulting permutations and store the one with minimal length but
* at least one swap.
* If all threads didn't swap, exit the program.
* Takes O(n) time.
*/
int minCost = 2147483647;
bool swap = false;
for (int j = 0; j < THREADS; ++j) {
if (minCost >= hPermutation[j].cost && hPermutation[j].nSucc != 0) {
currPerm = &(hPermutation[j]);
minCost = currPerm->cost;
swap = true;
if (minCost < allMinPerm->cost)
copy_permutation(allMinPerm, currPerm);
//memcpy(allMinPerm, currPerm, sizeof(struct permutation));
}
}
if (!swap) {
cout << "No swaps occured. Exit" << endl;
break;
}
if (oldCost == minCost) {
if (++repeatCost == 5) {
cout << "Cost did not change 5 times in a row. Exit" << endl;
break;
}
} else
repeatCost = 0;
cout << endl << "T = " << t << endl;
//cout << "repeat: " << repeatCost << ", old: " << oldCost << ", new: " << minCost << endl;
printInformation(currPerm, false);
//for (int j = 0; j < THREADS; ++j)
// printInformation(&(hPermutation[j]), false);
oldCost = minCost;
t *= COOLING;
}
endAll = clock();
runtimeAll = (endAll - startAll) / (1.0f * CLOCKS_PER_SEC) * 1000;
cout << endl << "Final Result:" << endl;
cout << "=============" << endl;
printInformation(allMinPerm);
runtime = runtimeAll;
resultCost = allMinPerm->cost;
printf("\nThe program needed an overall time of %.2lf ms.\n", runtimeAll);
printf("%.2lf ms were spent at the CUDA part.\n", cudaRuntime);
printf("So %.2lf ms were spent at the host.", runtimeAll - cudaRuntime);
hipFree(dPermutation);
hipFree(dCities);
hipFree(dLCGX);
free(allMinPerm);
free(LCGX);
free(hPermutation);
}
};
void readFile(char* FILENAME)
{
FILE *fp;
char line[80];
int i = 0;
fp = fopen(FILENAME, "rt");
for (int i = 0; i < 6; i++) {
fgets(line, 80, fp);
if (i == 3) {
char* pch = strtok(line, ":");
pch = strtok(NULL, "\t\n");
CITY_N = atoi(pch);
}
}
cout<<"Number of cities is "<<CITY_N<<endl;
cities = (struct city *) malloc (CITY_N * sizeof(struct city));
while (fgets(line, 80, fp) != NULL && i < CITY_N) {
sscanf(line, "%*d %lf %lf", &(cities[i].x), &(cities[i].y));
++i;
}
}
void printCities(struct city *cities)
{
cout << "Cities: " << endl;
for (int i = 0; i < CITY_N; ++i)
cout << i << ". x: " << cities[i].x << " y: " << cities[i].y << endl;
}
int main(int argc, char* argv[])
{
if (argc < 2) {
printf("Usage: ./tsp inputFiles\n");
exit(-1);
}
readFile(argv[1]);
int *order = (int *) malloc(CITY_N * sizeof(int));
float avgResult = 0.0f;
double avgRuntime = 0.0f;
for (int runs = 0; runs < NUMBER_RUNS; ++runs) {
for (int i = 0; i < CITY_N; ++i)
order[i] = i;
//printCities(cities);
Anneal *a = new Anneal();
a->order(cities, order);
avgResult += a->resultCost / (NUMBER_RUNS * 1.0f);
avgRuntime += a->runtime / (NUMBER_RUNS * 1.0f);
}
cout << endl << endl;
cout << "Average Costs: " << avgResult << endl;
cout << "Average Runtime: " << avgRuntime << endl;
return 0;
} |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <stdio.h>
#include <assert.h>
#include <cuda.h>
#include <cuda_runtime.h>
cudaDeviceProp deviceProps;
bool cuInit()
{
bool res = true;
int device = 0;
cudaError_t err = cudaGetDevice( &device );
if( err != cudaSuccess )
{
printf("%s\n", cudaGetErrorString(err));
res = false;
return res;
}
printf(" Device Count: %d\n", device+1 );
for(int i = 0; i <= device; i++)
{
cudaGetDeviceProperties(&deviceProps, device);
printf(" Device Number: %d\n", i);
printf(" Device name: %s\n", deviceProps.name);
printf(" Memory Clock Rate (KHz): %d\n", deviceProps.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n", deviceProps.memoryBusWidth );
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*deviceProps.memoryClockRate*(deviceProps.memoryBusWidth/8)/1.0e6);
printf(" Max Threads per Block: %d\n", deviceProps.maxThreadsPerBlock );
}
// Select CUDA device
cudaSetDevice(0);
return res;
}
//******************************************************************************************************
// !!! Exemple DO NOT TOCH !!!!!
//__global__ void findMean(unsigned int dataForBlock, float *inputData, float *results)
//{
// int index = blockIdx.x * blockDim.x + threadIdx.x;
// float result = 0;
// for (int i = 0; i < dataForBlock; i++)
// {
// result += inputData[index * dataForBlock + i];
// }
// result /= dataForBlock;
// results[index] = result;
//}
//void processWithGPU(float *blocks, float *results, unsigned int blockSize, unsigned int blocksCount)
//{
// unsigned int realDataCount = blockSize * blocksCount;
// cudaSetDevice(0);
// float *deviceInputData,
// *deviceResults;
// cudaMalloc( (void**)&deviceInputData, realDataCount * sizeof(float) );
// cudaMalloc( (void**)&deviceResults, realDataCount * sizeof(float) );
// cudaMemcpy( deviceInputData, blocks, realDataCount * sizeof(float), cudaMemcpyHostToDevice );
// findMean<<<1, blocksCount>>>( blockSize, deviceInputData, deviceResults );
// cudaMemcpy( (void*)results, deviceResults, blocksCount * sizeof(float), cudaMemcpyDeviceToHost );
// cudaFree(deviceInputData);
// cudaFree(deviceResults );
//}
//********************************************************************************************************
// Build Image
__global__ void cuBuildImageKernel( float* source, float* dest )
{
int x = blockIdx.x;
int y = blockIdx.y;
int offset = x + y * gridDim.x;
float* s = source + ( y * (gridDim.x * 2 ) * 2 + x * 2);
float t_[4], t_sume, t_result;
// Save temp values
t_[0] = s[0];
t_[1] = s[1];
t_[2] = s[ gridDim.x * 2];
t_[3] = s[1+gridDim.x * 2];
// Calculate sum
t_sume = t_[0] + t_[1] + t_[2] + t_[3];
// Calculate result
t_result = t_sume * 0.25;
dest[offset] = ( s[ 0 ] +
s[ 1 ] +
s[ gridDim.x * 2 ] +
s[ gridDim.x * 2 + 1] ) * 0.25f;
}
void cuBuildImage( const float* source, int sourceWidth, int sourceHeight,
const float* dest, int destWidth, int destHeight )
{
int sourceBuffLength = sourceWidth * sourceHeight;
int destBuffLength = destWidth * destHeight;
// Reserving memory on GPU
float *sourceBuff,
*destBuff;
cudaMalloc( (void**)&sourceBuff, sourceBuffLength * sizeof(float) );
cudaMalloc( (void**)&destBuff, destBuffLength * sizeof(float) );
// Copy input buffer
cudaMemcpy( sourceBuff, source, sourceBuffLength * sizeof(float), cudaMemcpyHostToDevice );
dim3 grid( destWidth, destHeight );
cuBuildImageKernel<<<grid, 1>>>( sourceBuff, destBuff );
cudaMemcpy( (void*)dest, destBuff, destBuffLength * sizeof(float), cudaMemcpyDeviceToHost );
cudaFree( sourceBuff );
cudaFree( destBuff );
}
//********************************************************************************************************
// Build Gradient
//__global__ void cuBuildGradientsKernel()
//{
//}
//void cuBuildGradients(const float* )
//{
// const float* img_pt = data.image[level] + width;
// const float* img_pt_max = data.image[level] + width * (height-1);
// float* gradxyii_pt = data.gradients[level] + width;
// // in each iteration i need -1,0,p1,mw,pw
// float val_m1 = *(img_pt-1);
// float val_00 = * img_pt;
// float val_p1;
// for(; img_pt < img_pt_max; img_pt++, gradxyii_pt++)
// {
// val_p1 = *(img_pt+1);
// *( (float*)gradxyii_pt +0) = 0.5f*(val_p1 - val_m1);
// *(((float*)gradxyii_pt)+1) = 0.5f*(*(img_pt+width) - *(img_pt-width));
// *(((float*)gradxyii_pt)+2) = val_00;
// val_m1 = val_00;
// val_00 = val_p1;
// }
//}
//********************************************************************************************************
// Build MaxGradient
//__global__ void cuBuildMaxGradientsKernel()
//{
//}
//void buildMaxGradients(int level)
//{
// float* maxGradTemp = FrameMemory::getInstance().getFloatBuffer(width * height);
// // 1. write abs gradients in real data.
// Eigen::Vector4f* gradxyii_pt = data.gradients[level] + width;
// float* maxgrad_pt = data.maxGradients[level] + width;
// float* maxgrad_pt_max = data.maxGradients[level] + width*(height-1);
// for(; maxgrad_pt < maxgrad_pt_max; maxgrad_pt++, gradxyii_pt++ )
// {
// float dx = *( (float*)gradxyii_pt);
// float dy = *(1+(float*)gradxyii_pt);
// *maxgrad_pt = sqrtf(dx*dx + dy*dy);
// }
// // 2. smear up/down direction into temp buffer
// maxgrad_pt = data.maxGradients[level] + width+1;
// maxgrad_pt_max = data.maxGradients[level] + width*(height-1)-1;
// float* maxgrad_t_pt = maxGradTemp + width+1;
// for(;maxgrad_pt<maxgrad_pt_max; maxgrad_pt++, maxgrad_t_pt++ )
// {
// float g1 = maxgrad_pt[-width];
// float g2 = maxgrad_pt[0];
// if(g1 < g2)
// g1 = g2;
// float g3 = maxgrad_pt[width];
// if(g1 < g3)
// *maxgrad_t_pt = g3;
// else
// *maxgrad_t_pt = g1;
// }
// float numMappablePixels = 0;
// // 2. smear left/right direction into real data
// maxgrad_pt = data.maxGradients[level] + width+1;
// maxgrad_pt_max = data.maxGradients[level] + width*(height-1)-1;
// maxgrad_t_pt = maxGradTemp + width+1;
// for(;maxgrad_pt<maxgrad_pt_max; maxgrad_pt++, maxgrad_t_pt++ )
// {
// float g1 = maxgrad_t_pt[-1];
// float g2 = maxgrad_t_pt[0];
// if(g1 < g2)
// g1 = g2;
// float g3 = maxgrad_t_pt[1];
// if(g1 < g3)
// {
// *maxgrad_pt = g3;
// if(g3 >= MIN_ABS_GRAD_CREATE)
// numMappablePixels++;
// }
// else
// {
// *maxgrad_pt = g1;
// if(g1 >= MIN_ABS_GRAD_CREATE)
// numMappablePixels++;
// }
// }
// if(level==0)
// this->numMappablePixels = numMappablePixels;
// FrameMemory::getInstance().returnBuffer(maxGradTemp);
//}
//********************************************************************************************************
//__global__ void cuBuildIDepthAndIDepthVarKernel()
//{
//}
//// Build IDepth And IDepth Var
//void buildIDepthAndIDepthVar( int level )
//{
// int sw = data.width[level - 1];
// const float* idepthSource = data.idepth [level - 1];
// const float* idepthVarSource = data.idepthVar[level - 1];
// float* idepthDest = data.idepth [level];
// float* idepthVarDest = data.idepthVar[level];
// for( int y = 0; y < height; y++ )
// {
// for( int x = 0; x < width; x++ )
// {
// int idx = 2 * ( x + y * sw );
// int idxDest = ( x + y * width );
// float idepthSumsSum = 0;
// float ivarSumsSum = 0;
// int num = 0;
// // build sums
// float ivar;
// float var = idepthVarSource[idx];
// if( var > 0 )
// {
// ivar = 1.0f / var;
// ivarSumsSum += ivar;
// idepthSumsSum += ivar * idepthSource[idx];
// num++;
// }
// var = idepthVarSource[ idx + 1 ];
// if( var > 0 )
// {
// ivar = 1.0f / var;
// ivarSumsSum += ivar;
// idepthSumsSum += ivar * idepthSource[ idx + 1 ];
// num++;
// }
// var = idepthVarSource[ idx + sw ];
// if( var > 0 )
// {
// ivar = 1.0f / var;
// ivarSumsSum += ivar;
// idepthSumsSum += ivar * idepthSource[ idx + sw ];
// num++;
// }
// var = idepthVarSource[ idx + sw + 1 ];
// if( var > 0 )
// {
// ivar = 1.0f / var;
// ivarSumsSum += ivar;
// idepthSumsSum += ivar * idepthSource[ idx + sw + 1 ];
// num++;
// }
// if(num > 0)
// {
// float depth = ivarSumsSum / idepthSumsSum;
// idepthDest [ idxDest ] = 1.0f / depth;
// idepthVarDest[ idxDest ] = num / ivarSumsSum;
// }
// else
// {
// idepthDest [ idxDest ] = -1;
// idepthVarDest[ idxDest ] = -1;
// }
// }
// }
//} | code for sm_80
Function : _Z18cuBuildImageKernelPfS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e220000002500 */
/*0020*/ MOV R5, c[0x0][0xc] ; /* 0x0000030000057a02 */
/* 0x000fe20000000f00 */
/*0030*/ HFMA2.MMA R15, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff0f7435 */
/* 0x000fe200000001ff */
/*0040*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0050*/ S2R R13, SR_CTAID.Y ; /* 0x00000000000d7919 */
/* 0x000e220000002600 */
/*0060*/ SHF.L.U32 R5, R5, 0x1, RZ ; /* 0x0000000105057819 */
/* 0x000fca00000006ff */
/*0070*/ IMAD R0, R5, R13, R6 ; /* 0x0000000d05007224 */
/* 0x001fca00078e0206 */
/*0080*/ SHF.L.U32 R0, R0, 0x1, RZ ; /* 0x0000000100007819 */
/* 0x000fca00000006ff */
/*0090*/ IMAD.WIDE.U32 R2, R0, R15, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fca00078e000f */
/*00a0*/ LDG.E R0, [R2.64] ; /* 0x0000000402007981 */
/* 0x000ea2000c1e1900 */
/*00b0*/ IMAD.WIDE.U32 R4, R5, 0x4, R2 ; /* 0x0000000405047825 */
/* 0x000fc600078e0002 */
/*00c0*/ LDG.E R7, [R2.64+0x4] ; /* 0x0000040402077981 */
/* 0x000ea8000c1e1900 */
/*00d0*/ LDG.E R9, [R4.64] ; /* 0x0000000404097981 */
/* 0x000ee8000c1e1900 */
/*00e0*/ LDG.E R11, [R4.64+0x4] ; /* 0x00000404040b7981 */
/* 0x000f22000c1e1900 */
/*00f0*/ IMAD R6, R13, c[0x0][0xc], R6 ; /* 0x000003000d067a24 */
/* 0x000fe400078e0206 */
/*0100*/ FADD R0, R0, R7 ; /* 0x0000000700007221 */
/* 0x004fc40000000000 */
/*0110*/ IMAD.WIDE R6, R6, R15, c[0x0][0x168] ; /* 0x00005a0006067625 */
/* 0x000fc800078e020f */
/*0120*/ FADD R0, R0, R9 ; /* 0x0000000900007221 */
/* 0x008fc80000000000 */
/*0130*/ FADD R0, R0, R11 ; /* 0x0000000b00007221 */
/* 0x010fc80000000000 */
/*0140*/ FMUL R9, R0, 0.25 ; /* 0x3e80000000097820 */
/* 0x000fca0000400000 */
/*0150*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*0160*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0170*/ BRA 0x170; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
#include <assert.h>
#include <cuda.h>
#include <cuda_runtime.h>
cudaDeviceProp deviceProps;
bool cuInit()
{
bool res = true;
int device = 0;
cudaError_t err = cudaGetDevice( &device );
if( err != cudaSuccess )
{
printf("%s\n", cudaGetErrorString(err));
res = false;
return res;
}
printf(" Device Count: %d\n", device+1 );
for(int i = 0; i <= device; i++)
{
cudaGetDeviceProperties(&deviceProps, device);
printf(" Device Number: %d\n", i);
printf(" Device name: %s\n", deviceProps.name);
printf(" Memory Clock Rate (KHz): %d\n", deviceProps.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n", deviceProps.memoryBusWidth );
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*deviceProps.memoryClockRate*(deviceProps.memoryBusWidth/8)/1.0e6);
printf(" Max Threads per Block: %d\n", deviceProps.maxThreadsPerBlock );
}
// Select CUDA device
cudaSetDevice(0);
return res;
}
//******************************************************************************************************
// !!! Exemple DO NOT TOCH !!!!!
//__global__ void findMean(unsigned int dataForBlock, float *inputData, float *results)
//{
// int index = blockIdx.x * blockDim.x + threadIdx.x;
// float result = 0;
// for (int i = 0; i < dataForBlock; i++)
// {
// result += inputData[index * dataForBlock + i];
// }
// result /= dataForBlock;
// results[index] = result;
//}
//void processWithGPU(float *blocks, float *results, unsigned int blockSize, unsigned int blocksCount)
//{
// unsigned int realDataCount = blockSize * blocksCount;
// cudaSetDevice(0);
// float *deviceInputData,
// *deviceResults;
// cudaMalloc( (void**)&deviceInputData, realDataCount * sizeof(float) );
// cudaMalloc( (void**)&deviceResults, realDataCount * sizeof(float) );
// cudaMemcpy( deviceInputData, blocks, realDataCount * sizeof(float), cudaMemcpyHostToDevice );
// findMean<<<1, blocksCount>>>( blockSize, deviceInputData, deviceResults );
// cudaMemcpy( (void*)results, deviceResults, blocksCount * sizeof(float), cudaMemcpyDeviceToHost );
// cudaFree(deviceInputData);
// cudaFree(deviceResults );
//}
//********************************************************************************************************
// Build Image
__global__ void cuBuildImageKernel( float* source, float* dest )
{
int x = blockIdx.x;
int y = blockIdx.y;
int offset = x + y * gridDim.x;
float* s = source + ( y * (gridDim.x * 2 ) * 2 + x * 2);
float t_[4], t_sume, t_result;
// Save temp values
t_[0] = s[0];
t_[1] = s[1];
t_[2] = s[ gridDim.x * 2];
t_[3] = s[1+gridDim.x * 2];
// Calculate sum
t_sume = t_[0] + t_[1] + t_[2] + t_[3];
// Calculate result
t_result = t_sume * 0.25;
dest[offset] = ( s[ 0 ] +
s[ 1 ] +
s[ gridDim.x * 2 ] +
s[ gridDim.x * 2 + 1] ) * 0.25f;
}
void cuBuildImage( const float* source, int sourceWidth, int sourceHeight,
const float* dest, int destWidth, int destHeight )
{
int sourceBuffLength = sourceWidth * sourceHeight;
int destBuffLength = destWidth * destHeight;
// Reserving memory on GPU
float *sourceBuff,
*destBuff;
cudaMalloc( (void**)&sourceBuff, sourceBuffLength * sizeof(float) );
cudaMalloc( (void**)&destBuff, destBuffLength * sizeof(float) );
// Copy input buffer
cudaMemcpy( sourceBuff, source, sourceBuffLength * sizeof(float), cudaMemcpyHostToDevice );
dim3 grid( destWidth, destHeight );
cuBuildImageKernel<<<grid, 1>>>( sourceBuff, destBuff );
cudaMemcpy( (void*)dest, destBuff, destBuffLength * sizeof(float), cudaMemcpyDeviceToHost );
cudaFree( sourceBuff );
cudaFree( destBuff );
}
//********************************************************************************************************
// Build Gradient
//__global__ void cuBuildGradientsKernel()
//{
//}
//void cuBuildGradients(const float* )
//{
// const float* img_pt = data.image[level] + width;
// const float* img_pt_max = data.image[level] + width * (height-1);
// float* gradxyii_pt = data.gradients[level] + width;
// // in each iteration i need -1,0,p1,mw,pw
// float val_m1 = *(img_pt-1);
// float val_00 = * img_pt;
// float val_p1;
// for(; img_pt < img_pt_max; img_pt++, gradxyii_pt++)
// {
// val_p1 = *(img_pt+1);
// *( (float*)gradxyii_pt +0) = 0.5f*(val_p1 - val_m1);
// *(((float*)gradxyii_pt)+1) = 0.5f*(*(img_pt+width) - *(img_pt-width));
// *(((float*)gradxyii_pt)+2) = val_00;
// val_m1 = val_00;
// val_00 = val_p1;
// }
//}
//********************************************************************************************************
// Build MaxGradient
//__global__ void cuBuildMaxGradientsKernel()
//{
//}
//void buildMaxGradients(int level)
//{
// float* maxGradTemp = FrameMemory::getInstance().getFloatBuffer(width * height);
// // 1. write abs gradients in real data.
// Eigen::Vector4f* gradxyii_pt = data.gradients[level] + width;
// float* maxgrad_pt = data.maxGradients[level] + width;
// float* maxgrad_pt_max = data.maxGradients[level] + width*(height-1);
// for(; maxgrad_pt < maxgrad_pt_max; maxgrad_pt++, gradxyii_pt++ )
// {
// float dx = *( (float*)gradxyii_pt);
// float dy = *(1+(float*)gradxyii_pt);
// *maxgrad_pt = sqrtf(dx*dx + dy*dy);
// }
// // 2. smear up/down direction into temp buffer
// maxgrad_pt = data.maxGradients[level] + width+1;
// maxgrad_pt_max = data.maxGradients[level] + width*(height-1)-1;
// float* maxgrad_t_pt = maxGradTemp + width+1;
// for(;maxgrad_pt<maxgrad_pt_max; maxgrad_pt++, maxgrad_t_pt++ )
// {
// float g1 = maxgrad_pt[-width];
// float g2 = maxgrad_pt[0];
// if(g1 < g2)
// g1 = g2;
// float g3 = maxgrad_pt[width];
// if(g1 < g3)
// *maxgrad_t_pt = g3;
// else
// *maxgrad_t_pt = g1;
// }
// float numMappablePixels = 0;
// // 2. smear left/right direction into real data
// maxgrad_pt = data.maxGradients[level] + width+1;
// maxgrad_pt_max = data.maxGradients[level] + width*(height-1)-1;
// maxgrad_t_pt = maxGradTemp + width+1;
// for(;maxgrad_pt<maxgrad_pt_max; maxgrad_pt++, maxgrad_t_pt++ )
// {
// float g1 = maxgrad_t_pt[-1];
// float g2 = maxgrad_t_pt[0];
// if(g1 < g2)
// g1 = g2;
// float g3 = maxgrad_t_pt[1];
// if(g1 < g3)
// {
// *maxgrad_pt = g3;
// if(g3 >= MIN_ABS_GRAD_CREATE)
// numMappablePixels++;
// }
// else
// {
// *maxgrad_pt = g1;
// if(g1 >= MIN_ABS_GRAD_CREATE)
// numMappablePixels++;
// }
// }
// if(level==0)
// this->numMappablePixels = numMappablePixels;
// FrameMemory::getInstance().returnBuffer(maxGradTemp);
//}
//********************************************************************************************************
//__global__ void cuBuildIDepthAndIDepthVarKernel()
//{
//}
//// Build IDepth And IDepth Var
//void buildIDepthAndIDepthVar( int level )
//{
// int sw = data.width[level - 1];
// const float* idepthSource = data.idepth [level - 1];
// const float* idepthVarSource = data.idepthVar[level - 1];
// float* idepthDest = data.idepth [level];
// float* idepthVarDest = data.idepthVar[level];
// for( int y = 0; y < height; y++ )
// {
// for( int x = 0; x < width; x++ )
// {
// int idx = 2 * ( x + y * sw );
// int idxDest = ( x + y * width );
// float idepthSumsSum = 0;
// float ivarSumsSum = 0;
// int num = 0;
// // build sums
// float ivar;
// float var = idepthVarSource[idx];
// if( var > 0 )
// {
// ivar = 1.0f / var;
// ivarSumsSum += ivar;
// idepthSumsSum += ivar * idepthSource[idx];
// num++;
// }
// var = idepthVarSource[ idx + 1 ];
// if( var > 0 )
// {
// ivar = 1.0f / var;
// ivarSumsSum += ivar;
// idepthSumsSum += ivar * idepthSource[ idx + 1 ];
// num++;
// }
// var = idepthVarSource[ idx + sw ];
// if( var > 0 )
// {
// ivar = 1.0f / var;
// ivarSumsSum += ivar;
// idepthSumsSum += ivar * idepthSource[ idx + sw ];
// num++;
// }
// var = idepthVarSource[ idx + sw + 1 ];
// if( var > 0 )
// {
// ivar = 1.0f / var;
// ivarSumsSum += ivar;
// idepthSumsSum += ivar * idepthSource[ idx + sw + 1 ];
// num++;
// }
// if(num > 0)
// {
// float depth = ivarSumsSum / idepthSumsSum;
// idepthDest [ idxDest ] = 1.0f / depth;
// idepthVarDest[ idxDest ] = num / ivarSumsSum;
// }
// else
// {
// idepthDest [ idxDest ] = -1;
// idepthVarDest[ idxDest ] = -1;
// }
// }
// }
//} | .file "tmpxft_000bf00e_00000000-6_gpu_processor.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "%s\n"
.LC1:
.string " Device Count: %d\n"
.LC2:
.string " Device Number: %d\n"
.LC3:
.string " Device name: %s\n"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC4:
.string " Memory Clock Rate (KHz): %d\n"
.align 8
.LC5:
.string " Memory Bus Width (bits): %d\n"
.align 8
.LC7:
.string " Peak Memory Bandwidth (GB/s): %f\n\n"
.section .rodata.str1.1
.LC8:
.string " Max Threads per Block: %d\n"
.text
.globl _Z6cuInitv
.type _Z6cuInitv, @function
_Z6cuInitv:
.LFB2057:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $24, %rsp
.cfi_def_cfa_offset 64
movq %fs:40, %rax
movq %rax, 8(%rsp)
xorl %eax, %eax
movl $0, 4(%rsp)
leaq 4(%rsp), %rdi
call cudaGetDevice@PLT
testl %eax, %eax
jne .L11
movl 4(%rsp), %eax
leal 1(%rax), %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 4(%rsp), %esi
testl %esi, %esi
js .L6
movl $0, %ebp
leaq deviceProps(%rip), %rbx
leaq .LC2(%rip), %r13
leaq .LC3(%rip), %r12
.L7:
movq %rbx, %rdi
call cudaGetDeviceProperties_v2@PLT
movl %ebp, %edx
movq %r13, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %rbx, %rdx
movq %r12, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 608(%rbx), %edx
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 612(%rbx), %edx
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
pxor %xmm0, %xmm0
cvtsi2sdl 608(%rbx), %xmm0
addsd %xmm0, %xmm0
movl 612(%rbx), %edx
leal 7(%rdx), %eax
testl %edx, %edx
cmovns %edx, %eax
sarl $3, %eax
pxor %xmm1, %xmm1
cvtsi2sdl %eax, %xmm1
mulsd %xmm1, %xmm0
divsd .LC6(%rip), %xmm0
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movl 320(%rbx), %edx
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addl $1, %ebp
movl 4(%rsp), %esi
cmpl %ebp, %esi
jge .L7
.L6:
movl $0, %edi
call cudaSetDevice@PLT
movl $1, %eax
.L3:
movq 8(%rsp), %rdx
subq %fs:40, %rdx
jne .L12
addq $24, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L11:
.cfi_restore_state
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %eax
jmp .L3
.L12:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size _Z6cuInitv, .-_Z6cuInitv
.globl _Z40__device_stub__Z18cuBuildImageKernelPfS_PfS_
.type _Z40__device_stub__Z18cuBuildImageKernelPfS_PfS_, @function
_Z40__device_stub__Z18cuBuildImageKernelPfS_PfS_:
.LFB2083:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movq %rsi, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movq %rsp, %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L17
.L13:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L18
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L17:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z18cuBuildImageKernelPfS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L13
.L18:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z40__device_stub__Z18cuBuildImageKernelPfS_PfS_, .-_Z40__device_stub__Z18cuBuildImageKernelPfS_PfS_
.globl _Z18cuBuildImageKernelPfS_
.type _Z18cuBuildImageKernelPfS_, @function
_Z18cuBuildImageKernelPfS_:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z40__device_stub__Z18cuBuildImageKernelPfS_PfS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z18cuBuildImageKernelPfS_, .-_Z18cuBuildImageKernelPfS_
.globl _Z12cuBuildImagePKfiiS0_ii
.type _Z12cuBuildImagePKfiiS0_ii, @function
_Z12cuBuildImagePKfiiS0_ii:
.LFB2058:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $56, %rsp
.cfi_def_cfa_offset 112
movq %rdi, %r15
movq %rcx, %r14
movl %r8d, %r13d
movl %r9d, %r12d
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
movl %r8d, %ebp
imull %r9d, %ebp
imull %edx, %esi
movslq %esi, %rbx
salq $2, %rbx
movq %rsp, %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movslq %ebp, %rbp
salq $2, %rbp
leaq 8(%rsp), %rdi
movq %rbp, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbx, %rdx
movq %r15, %rsi
movq (%rsp), %rdi
call cudaMemcpy@PLT
movl %r13d, 16(%rsp)
movl %r12d, 20(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 28(%rsp), %rdx
movl $1, %ecx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L25
.L22:
movl $2, %ecx
movq %rbp, %rdx
movq 8(%rsp), %rsi
movq %r14, %rdi
call cudaMemcpy@PLT
movq (%rsp), %rdi
call cudaFree@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L26
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L25:
.cfi_restore_state
movq 8(%rsp), %rsi
movq (%rsp), %rdi
call _Z40__device_stub__Z18cuBuildImageKernelPfS_PfS_
jmp .L22
.L26:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2058:
.size _Z12cuBuildImagePKfiiS0_ii, .-_Z12cuBuildImagePKfiiS0_ii
.section .rodata.str1.1
.LC9:
.string "_Z18cuBuildImageKernelPfS_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC9(%rip), %rdx
movq %rdx, %rcx
leaq _Z18cuBuildImageKernelPfS_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.globl deviceProps
.bss
.align 32
.type deviceProps, @object
.size deviceProps, 1032
deviceProps:
.zero 1032
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC6:
.long 0
.long 1093567616
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include <assert.h>
#include <cuda.h>
#include <cuda_runtime.h>
cudaDeviceProp deviceProps;
bool cuInit()
{
bool res = true;
int device = 0;
cudaError_t err = cudaGetDevice( &device );
if( err != cudaSuccess )
{
printf("%s\n", cudaGetErrorString(err));
res = false;
return res;
}
printf(" Device Count: %d\n", device+1 );
for(int i = 0; i <= device; i++)
{
cudaGetDeviceProperties(&deviceProps, device);
printf(" Device Number: %d\n", i);
printf(" Device name: %s\n", deviceProps.name);
printf(" Memory Clock Rate (KHz): %d\n", deviceProps.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n", deviceProps.memoryBusWidth );
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*deviceProps.memoryClockRate*(deviceProps.memoryBusWidth/8)/1.0e6);
printf(" Max Threads per Block: %d\n", deviceProps.maxThreadsPerBlock );
}
// Select CUDA device
cudaSetDevice(0);
return res;
}
//******************************************************************************************************
// !!! Exemple DO NOT TOCH !!!!!
//__global__ void findMean(unsigned int dataForBlock, float *inputData, float *results)
//{
// int index = blockIdx.x * blockDim.x + threadIdx.x;
// float result = 0;
// for (int i = 0; i < dataForBlock; i++)
// {
// result += inputData[index * dataForBlock + i];
// }
// result /= dataForBlock;
// results[index] = result;
//}
//void processWithGPU(float *blocks, float *results, unsigned int blockSize, unsigned int blocksCount)
//{
// unsigned int realDataCount = blockSize * blocksCount;
// cudaSetDevice(0);
// float *deviceInputData,
// *deviceResults;
// cudaMalloc( (void**)&deviceInputData, realDataCount * sizeof(float) );
// cudaMalloc( (void**)&deviceResults, realDataCount * sizeof(float) );
// cudaMemcpy( deviceInputData, blocks, realDataCount * sizeof(float), cudaMemcpyHostToDevice );
// findMean<<<1, blocksCount>>>( blockSize, deviceInputData, deviceResults );
// cudaMemcpy( (void*)results, deviceResults, blocksCount * sizeof(float), cudaMemcpyDeviceToHost );
// cudaFree(deviceInputData);
// cudaFree(deviceResults );
//}
//********************************************************************************************************
// Build Image
__global__ void cuBuildImageKernel( float* source, float* dest )
{
int x = blockIdx.x;
int y = blockIdx.y;
int offset = x + y * gridDim.x;
float* s = source + ( y * (gridDim.x * 2 ) * 2 + x * 2);
float t_[4], t_sume, t_result;
// Save temp values
t_[0] = s[0];
t_[1] = s[1];
t_[2] = s[ gridDim.x * 2];
t_[3] = s[1+gridDim.x * 2];
// Calculate sum
t_sume = t_[0] + t_[1] + t_[2] + t_[3];
// Calculate result
t_result = t_sume * 0.25;
dest[offset] = ( s[ 0 ] +
s[ 1 ] +
s[ gridDim.x * 2 ] +
s[ gridDim.x * 2 + 1] ) * 0.25f;
}
void cuBuildImage( const float* source, int sourceWidth, int sourceHeight,
const float* dest, int destWidth, int destHeight )
{
int sourceBuffLength = sourceWidth * sourceHeight;
int destBuffLength = destWidth * destHeight;
// Reserving memory on GPU
float *sourceBuff,
*destBuff;
cudaMalloc( (void**)&sourceBuff, sourceBuffLength * sizeof(float) );
cudaMalloc( (void**)&destBuff, destBuffLength * sizeof(float) );
// Copy input buffer
cudaMemcpy( sourceBuff, source, sourceBuffLength * sizeof(float), cudaMemcpyHostToDevice );
dim3 grid( destWidth, destHeight );
cuBuildImageKernel<<<grid, 1>>>( sourceBuff, destBuff );
cudaMemcpy( (void*)dest, destBuff, destBuffLength * sizeof(float), cudaMemcpyDeviceToHost );
cudaFree( sourceBuff );
cudaFree( destBuff );
}
//********************************************************************************************************
// Build Gradient
//__global__ void cuBuildGradientsKernel()
//{
//}
//void cuBuildGradients(const float* )
//{
// const float* img_pt = data.image[level] + width;
// const float* img_pt_max = data.image[level] + width * (height-1);
// float* gradxyii_pt = data.gradients[level] + width;
// // in each iteration i need -1,0,p1,mw,pw
// float val_m1 = *(img_pt-1);
// float val_00 = * img_pt;
// float val_p1;
// for(; img_pt < img_pt_max; img_pt++, gradxyii_pt++)
// {
// val_p1 = *(img_pt+1);
// *( (float*)gradxyii_pt +0) = 0.5f*(val_p1 - val_m1);
// *(((float*)gradxyii_pt)+1) = 0.5f*(*(img_pt+width) - *(img_pt-width));
// *(((float*)gradxyii_pt)+2) = val_00;
// val_m1 = val_00;
// val_00 = val_p1;
// }
//}
//********************************************************************************************************
// Build MaxGradient
//__global__ void cuBuildMaxGradientsKernel()
//{
//}
//void buildMaxGradients(int level)
//{
// float* maxGradTemp = FrameMemory::getInstance().getFloatBuffer(width * height);
// // 1. write abs gradients in real data.
// Eigen::Vector4f* gradxyii_pt = data.gradients[level] + width;
// float* maxgrad_pt = data.maxGradients[level] + width;
// float* maxgrad_pt_max = data.maxGradients[level] + width*(height-1);
// for(; maxgrad_pt < maxgrad_pt_max; maxgrad_pt++, gradxyii_pt++ )
// {
// float dx = *( (float*)gradxyii_pt);
// float dy = *(1+(float*)gradxyii_pt);
// *maxgrad_pt = sqrtf(dx*dx + dy*dy);
// }
// // 2. smear up/down direction into temp buffer
// maxgrad_pt = data.maxGradients[level] + width+1;
// maxgrad_pt_max = data.maxGradients[level] + width*(height-1)-1;
// float* maxgrad_t_pt = maxGradTemp + width+1;
// for(;maxgrad_pt<maxgrad_pt_max; maxgrad_pt++, maxgrad_t_pt++ )
// {
// float g1 = maxgrad_pt[-width];
// float g2 = maxgrad_pt[0];
// if(g1 < g2)
// g1 = g2;
// float g3 = maxgrad_pt[width];
// if(g1 < g3)
// *maxgrad_t_pt = g3;
// else
// *maxgrad_t_pt = g1;
// }
// float numMappablePixels = 0;
// // 2. smear left/right direction into real data
// maxgrad_pt = data.maxGradients[level] + width+1;
// maxgrad_pt_max = data.maxGradients[level] + width*(height-1)-1;
// maxgrad_t_pt = maxGradTemp + width+1;
// for(;maxgrad_pt<maxgrad_pt_max; maxgrad_pt++, maxgrad_t_pt++ )
// {
// float g1 = maxgrad_t_pt[-1];
// float g2 = maxgrad_t_pt[0];
// if(g1 < g2)
// g1 = g2;
// float g3 = maxgrad_t_pt[1];
// if(g1 < g3)
// {
// *maxgrad_pt = g3;
// if(g3 >= MIN_ABS_GRAD_CREATE)
// numMappablePixels++;
// }
// else
// {
// *maxgrad_pt = g1;
// if(g1 >= MIN_ABS_GRAD_CREATE)
// numMappablePixels++;
// }
// }
// if(level==0)
// this->numMappablePixels = numMappablePixels;
// FrameMemory::getInstance().returnBuffer(maxGradTemp);
//}
//********************************************************************************************************
//__global__ void cuBuildIDepthAndIDepthVarKernel()
//{
//}
//// Build IDepth And IDepth Var
//void buildIDepthAndIDepthVar( int level )
//{
// int sw = data.width[level - 1];
// const float* idepthSource = data.idepth [level - 1];
// const float* idepthVarSource = data.idepthVar[level - 1];
// float* idepthDest = data.idepth [level];
// float* idepthVarDest = data.idepthVar[level];
// for( int y = 0; y < height; y++ )
// {
// for( int x = 0; x < width; x++ )
// {
// int idx = 2 * ( x + y * sw );
// int idxDest = ( x + y * width );
// float idepthSumsSum = 0;
// float ivarSumsSum = 0;
// int num = 0;
// // build sums
// float ivar;
// float var = idepthVarSource[idx];
// if( var > 0 )
// {
// ivar = 1.0f / var;
// ivarSumsSum += ivar;
// idepthSumsSum += ivar * idepthSource[idx];
// num++;
// }
// var = idepthVarSource[ idx + 1 ];
// if( var > 0 )
// {
// ivar = 1.0f / var;
// ivarSumsSum += ivar;
// idepthSumsSum += ivar * idepthSource[ idx + 1 ];
// num++;
// }
// var = idepthVarSource[ idx + sw ];
// if( var > 0 )
// {
// ivar = 1.0f / var;
// ivarSumsSum += ivar;
// idepthSumsSum += ivar * idepthSource[ idx + sw ];
// num++;
// }
// var = idepthVarSource[ idx + sw + 1 ];
// if( var > 0 )
// {
// ivar = 1.0f / var;
// ivarSumsSum += ivar;
// idepthSumsSum += ivar * idepthSource[ idx + sw + 1 ];
// num++;
// }
// if(num > 0)
// {
// float depth = ivarSumsSum / idepthSumsSum;
// idepthDest [ idxDest ] = 1.0f / depth;
// idepthVarDest[ idxDest ] = num / ivarSumsSum;
// }
// else
// {
// idepthDest [ idxDest ] = -1;
// idepthVarDest[ idxDest ] = -1;
// }
// }
// }
//} | #include <stdio.h>
#include <assert.h>
#include <hip/hip_runtime.h>
hipDeviceProp_t deviceProps;
bool hipInit()
{
bool res = true;
int device = 0;
hipError_t err = hipGetDevice( &device );
if( err != hipSuccess )
{
printf("%s\n", hipGetErrorString(err));
res = false;
return res;
}
printf(" Device Count: %d\n", device+1 );
for(int i = 0; i <= device; i++)
{
hipGetDeviceProperties(&deviceProps, device);
printf(" Device Number: %d\n", i);
printf(" Device name: %s\n", deviceProps.name);
printf(" Memory Clock Rate (KHz): %d\n", deviceProps.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n", deviceProps.memoryBusWidth );
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*deviceProps.memoryClockRate*(deviceProps.memoryBusWidth/8)/1.0e6);
printf(" Max Threads per Block: %d\n", deviceProps.maxThreadsPerBlock );
}
// Select CUDA device
hipSetDevice(0);
return res;
}
//******************************************************************************************************
// !!! Exemple DO NOT TOCH !!!!!
//__global__ void findMean(unsigned int dataForBlock, float *inputData, float *results)
//{
// int index = blockIdx.x * blockDim.x + threadIdx.x;
// float result = 0;
// for (int i = 0; i < dataForBlock; i++)
// {
// result += inputData[index * dataForBlock + i];
// }
// result /= dataForBlock;
// results[index] = result;
//}
//void processWithGPU(float *blocks, float *results, unsigned int blockSize, unsigned int blocksCount)
//{
// unsigned int realDataCount = blockSize * blocksCount;
// cudaSetDevice(0);
// float *deviceInputData,
// *deviceResults;
// cudaMalloc( (void**)&deviceInputData, realDataCount * sizeof(float) );
// cudaMalloc( (void**)&deviceResults, realDataCount * sizeof(float) );
// cudaMemcpy( deviceInputData, blocks, realDataCount * sizeof(float), cudaMemcpyHostToDevice );
// findMean<<<1, blocksCount>>>( blockSize, deviceInputData, deviceResults );
// cudaMemcpy( (void*)results, deviceResults, blocksCount * sizeof(float), cudaMemcpyDeviceToHost );
// cudaFree(deviceInputData);
// cudaFree(deviceResults );
//}
//********************************************************************************************************
// Build Image
__global__ void cuBuildImageKernel( float* source, float* dest )
{
int x = blockIdx.x;
int y = blockIdx.y;
int offset = x + y * gridDim.x;
float* s = source + ( y * (gridDim.x * 2 ) * 2 + x * 2);
float t_[4], t_sume, t_result;
// Save temp values
t_[0] = s[0];
t_[1] = s[1];
t_[2] = s[ gridDim.x * 2];
t_[3] = s[1+gridDim.x * 2];
// Calculate sum
t_sume = t_[0] + t_[1] + t_[2] + t_[3];
// Calculate result
t_result = t_sume * 0.25;
dest[offset] = ( s[ 0 ] +
s[ 1 ] +
s[ gridDim.x * 2 ] +
s[ gridDim.x * 2 + 1] ) * 0.25f;
}
void cuBuildImage( const float* source, int sourceWidth, int sourceHeight,
const float* dest, int destWidth, int destHeight )
{
int sourceBuffLength = sourceWidth * sourceHeight;
int destBuffLength = destWidth * destHeight;
// Reserving memory on GPU
float *sourceBuff,
*destBuff;
hipMalloc( (void**)&sourceBuff, sourceBuffLength * sizeof(float) );
hipMalloc( (void**)&destBuff, destBuffLength * sizeof(float) );
// Copy input buffer
hipMemcpy( sourceBuff, source, sourceBuffLength * sizeof(float), hipMemcpyHostToDevice );
dim3 grid( destWidth, destHeight );
cuBuildImageKernel<<<grid, 1>>>( sourceBuff, destBuff );
hipMemcpy( (void*)dest, destBuff, destBuffLength * sizeof(float), hipMemcpyDeviceToHost );
hipFree( sourceBuff );
hipFree( destBuff );
}
//********************************************************************************************************
// Build Gradient
//__global__ void cuBuildGradientsKernel()
//{
//}
//void cuBuildGradients(const float* )
//{
// const float* img_pt = data.image[level] + width;
// const float* img_pt_max = data.image[level] + width * (height-1);
// float* gradxyii_pt = data.gradients[level] + width;
// // in each iteration i need -1,0,p1,mw,pw
// float val_m1 = *(img_pt-1);
// float val_00 = * img_pt;
// float val_p1;
// for(; img_pt < img_pt_max; img_pt++, gradxyii_pt++)
// {
// val_p1 = *(img_pt+1);
// *( (float*)gradxyii_pt +0) = 0.5f*(val_p1 - val_m1);
// *(((float*)gradxyii_pt)+1) = 0.5f*(*(img_pt+width) - *(img_pt-width));
// *(((float*)gradxyii_pt)+2) = val_00;
// val_m1 = val_00;
// val_00 = val_p1;
// }
//}
//********************************************************************************************************
// Build MaxGradient
//__global__ void cuBuildMaxGradientsKernel()
//{
//}
//void buildMaxGradients(int level)
//{
// float* maxGradTemp = FrameMemory::getInstance().getFloatBuffer(width * height);
// // 1. write abs gradients in real data.
// Eigen::Vector4f* gradxyii_pt = data.gradients[level] + width;
// float* maxgrad_pt = data.maxGradients[level] + width;
// float* maxgrad_pt_max = data.maxGradients[level] + width*(height-1);
// for(; maxgrad_pt < maxgrad_pt_max; maxgrad_pt++, gradxyii_pt++ )
// {
// float dx = *( (float*)gradxyii_pt);
// float dy = *(1+(float*)gradxyii_pt);
// *maxgrad_pt = sqrtf(dx*dx + dy*dy);
// }
// // 2. smear up/down direction into temp buffer
// maxgrad_pt = data.maxGradients[level] + width+1;
// maxgrad_pt_max = data.maxGradients[level] + width*(height-1)-1;
// float* maxgrad_t_pt = maxGradTemp + width+1;
// for(;maxgrad_pt<maxgrad_pt_max; maxgrad_pt++, maxgrad_t_pt++ )
// {
// float g1 = maxgrad_pt[-width];
// float g2 = maxgrad_pt[0];
// if(g1 < g2)
// g1 = g2;
// float g3 = maxgrad_pt[width];
// if(g1 < g3)
// *maxgrad_t_pt = g3;
// else
// *maxgrad_t_pt = g1;
// }
// float numMappablePixels = 0;
// // 2. smear left/right direction into real data
// maxgrad_pt = data.maxGradients[level] + width+1;
// maxgrad_pt_max = data.maxGradients[level] + width*(height-1)-1;
// maxgrad_t_pt = maxGradTemp + width+1;
// for(;maxgrad_pt<maxgrad_pt_max; maxgrad_pt++, maxgrad_t_pt++ )
// {
// float g1 = maxgrad_t_pt[-1];
// float g2 = maxgrad_t_pt[0];
// if(g1 < g2)
// g1 = g2;
// float g3 = maxgrad_t_pt[1];
// if(g1 < g3)
// {
// *maxgrad_pt = g3;
// if(g3 >= MIN_ABS_GRAD_CREATE)
// numMappablePixels++;
// }
// else
// {
// *maxgrad_pt = g1;
// if(g1 >= MIN_ABS_GRAD_CREATE)
// numMappablePixels++;
// }
// }
// if(level==0)
// this->numMappablePixels = numMappablePixels;
// FrameMemory::getInstance().returnBuffer(maxGradTemp);
//}
//********************************************************************************************************
//__global__ void cuBuildIDepthAndIDepthVarKernel()
//{
//}
//// Build IDepth And IDepth Var
//void buildIDepthAndIDepthVar( int level )
//{
// int sw = data.width[level - 1];
// const float* idepthSource = data.idepth [level - 1];
// const float* idepthVarSource = data.idepthVar[level - 1];
// float* idepthDest = data.idepth [level];
// float* idepthVarDest = data.idepthVar[level];
// for( int y = 0; y < height; y++ )
// {
// for( int x = 0; x < width; x++ )
// {
// int idx = 2 * ( x + y * sw );
// int idxDest = ( x + y * width );
// float idepthSumsSum = 0;
// float ivarSumsSum = 0;
// int num = 0;
// // build sums
// float ivar;
// float var = idepthVarSource[idx];
// if( var > 0 )
// {
// ivar = 1.0f / var;
// ivarSumsSum += ivar;
// idepthSumsSum += ivar * idepthSource[idx];
// num++;
// }
// var = idepthVarSource[ idx + 1 ];
// if( var > 0 )
// {
// ivar = 1.0f / var;
// ivarSumsSum += ivar;
// idepthSumsSum += ivar * idepthSource[ idx + 1 ];
// num++;
// }
// var = idepthVarSource[ idx + sw ];
// if( var > 0 )
// {
// ivar = 1.0f / var;
// ivarSumsSum += ivar;
// idepthSumsSum += ivar * idepthSource[ idx + sw ];
// num++;
// }
// var = idepthVarSource[ idx + sw + 1 ];
// if( var > 0 )
// {
// ivar = 1.0f / var;
// ivarSumsSum += ivar;
// idepthSumsSum += ivar * idepthSource[ idx + sw + 1 ];
// num++;
// }
// if(num > 0)
// {
// float depth = ivarSumsSum / idepthSumsSum;
// idepthDest [ idxDest ] = 1.0f / depth;
// idepthVarDest[ idxDest ] = num / ivarSumsSum;
// }
// else
// {
// idepthDest [ idxDest ] = -1;
// idepthVarDest[ idxDest ] = -1;
// }
// }
// }
//} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <stdio.h>
#include <assert.h>
#include <hip/hip_runtime.h>
hipDeviceProp_t deviceProps;
bool hipInit()
{
bool res = true;
int device = 0;
hipError_t err = hipGetDevice( &device );
if( err != hipSuccess )
{
printf("%s\n", hipGetErrorString(err));
res = false;
return res;
}
printf(" Device Count: %d\n", device+1 );
for(int i = 0; i <= device; i++)
{
hipGetDeviceProperties(&deviceProps, device);
printf(" Device Number: %d\n", i);
printf(" Device name: %s\n", deviceProps.name);
printf(" Memory Clock Rate (KHz): %d\n", deviceProps.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n", deviceProps.memoryBusWidth );
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*deviceProps.memoryClockRate*(deviceProps.memoryBusWidth/8)/1.0e6);
printf(" Max Threads per Block: %d\n", deviceProps.maxThreadsPerBlock );
}
// Select CUDA device
hipSetDevice(0);
return res;
}
//******************************************************************************************************
// !!! Exemple DO NOT TOCH !!!!!
//__global__ void findMean(unsigned int dataForBlock, float *inputData, float *results)
//{
// int index = blockIdx.x * blockDim.x + threadIdx.x;
// float result = 0;
// for (int i = 0; i < dataForBlock; i++)
// {
// result += inputData[index * dataForBlock + i];
// }
// result /= dataForBlock;
// results[index] = result;
//}
//void processWithGPU(float *blocks, float *results, unsigned int blockSize, unsigned int blocksCount)
//{
// unsigned int realDataCount = blockSize * blocksCount;
// cudaSetDevice(0);
// float *deviceInputData,
// *deviceResults;
// cudaMalloc( (void**)&deviceInputData, realDataCount * sizeof(float) );
// cudaMalloc( (void**)&deviceResults, realDataCount * sizeof(float) );
// cudaMemcpy( deviceInputData, blocks, realDataCount * sizeof(float), cudaMemcpyHostToDevice );
// findMean<<<1, blocksCount>>>( blockSize, deviceInputData, deviceResults );
// cudaMemcpy( (void*)results, deviceResults, blocksCount * sizeof(float), cudaMemcpyDeviceToHost );
// cudaFree(deviceInputData);
// cudaFree(deviceResults );
//}
//********************************************************************************************************
// Build Image
__global__ void cuBuildImageKernel( float* source, float* dest )
{
int x = blockIdx.x;
int y = blockIdx.y;
int offset = x + y * gridDim.x;
float* s = source + ( y * (gridDim.x * 2 ) * 2 + x * 2);
float t_[4], t_sume, t_result;
// Save temp values
t_[0] = s[0];
t_[1] = s[1];
t_[2] = s[ gridDim.x * 2];
t_[3] = s[1+gridDim.x * 2];
// Calculate sum
t_sume = t_[0] + t_[1] + t_[2] + t_[3];
// Calculate result
t_result = t_sume * 0.25;
dest[offset] = ( s[ 0 ] +
s[ 1 ] +
s[ gridDim.x * 2 ] +
s[ gridDim.x * 2 + 1] ) * 0.25f;
}
void cuBuildImage( const float* source, int sourceWidth, int sourceHeight,
const float* dest, int destWidth, int destHeight )
{
int sourceBuffLength = sourceWidth * sourceHeight;
int destBuffLength = destWidth * destHeight;
// Reserving memory on GPU
float *sourceBuff,
*destBuff;
hipMalloc( (void**)&sourceBuff, sourceBuffLength * sizeof(float) );
hipMalloc( (void**)&destBuff, destBuffLength * sizeof(float) );
// Copy input buffer
hipMemcpy( sourceBuff, source, sourceBuffLength * sizeof(float), hipMemcpyHostToDevice );
dim3 grid( destWidth, destHeight );
cuBuildImageKernel<<<grid, 1>>>( sourceBuff, destBuff );
hipMemcpy( (void*)dest, destBuff, destBuffLength * sizeof(float), hipMemcpyDeviceToHost );
hipFree( sourceBuff );
hipFree( destBuff );
}
//********************************************************************************************************
// Build Gradient
//__global__ void cuBuildGradientsKernel()
//{
//}
//void cuBuildGradients(const float* )
//{
// const float* img_pt = data.image[level] + width;
// const float* img_pt_max = data.image[level] + width * (height-1);
// float* gradxyii_pt = data.gradients[level] + width;
// // in each iteration i need -1,0,p1,mw,pw
// float val_m1 = *(img_pt-1);
// float val_00 = * img_pt;
// float val_p1;
// for(; img_pt < img_pt_max; img_pt++, gradxyii_pt++)
// {
// val_p1 = *(img_pt+1);
// *( (float*)gradxyii_pt +0) = 0.5f*(val_p1 - val_m1);
// *(((float*)gradxyii_pt)+1) = 0.5f*(*(img_pt+width) - *(img_pt-width));
// *(((float*)gradxyii_pt)+2) = val_00;
// val_m1 = val_00;
// val_00 = val_p1;
// }
//}
//********************************************************************************************************
// Build MaxGradient
//__global__ void cuBuildMaxGradientsKernel()
//{
//}
//void buildMaxGradients(int level)
//{
// float* maxGradTemp = FrameMemory::getInstance().getFloatBuffer(width * height);
// // 1. write abs gradients in real data.
// Eigen::Vector4f* gradxyii_pt = data.gradients[level] + width;
// float* maxgrad_pt = data.maxGradients[level] + width;
// float* maxgrad_pt_max = data.maxGradients[level] + width*(height-1);
// for(; maxgrad_pt < maxgrad_pt_max; maxgrad_pt++, gradxyii_pt++ )
// {
// float dx = *( (float*)gradxyii_pt);
// float dy = *(1+(float*)gradxyii_pt);
// *maxgrad_pt = sqrtf(dx*dx + dy*dy);
// }
// // 2. smear up/down direction into temp buffer
// maxgrad_pt = data.maxGradients[level] + width+1;
// maxgrad_pt_max = data.maxGradients[level] + width*(height-1)-1;
// float* maxgrad_t_pt = maxGradTemp + width+1;
// for(;maxgrad_pt<maxgrad_pt_max; maxgrad_pt++, maxgrad_t_pt++ )
// {
// float g1 = maxgrad_pt[-width];
// float g2 = maxgrad_pt[0];
// if(g1 < g2)
// g1 = g2;
// float g3 = maxgrad_pt[width];
// if(g1 < g3)
// *maxgrad_t_pt = g3;
// else
// *maxgrad_t_pt = g1;
// }
// float numMappablePixels = 0;
// // 2. smear left/right direction into real data
// maxgrad_pt = data.maxGradients[level] + width+1;
// maxgrad_pt_max = data.maxGradients[level] + width*(height-1)-1;
// maxgrad_t_pt = maxGradTemp + width+1;
// for(;maxgrad_pt<maxgrad_pt_max; maxgrad_pt++, maxgrad_t_pt++ )
// {
// float g1 = maxgrad_t_pt[-1];
// float g2 = maxgrad_t_pt[0];
// if(g1 < g2)
// g1 = g2;
// float g3 = maxgrad_t_pt[1];
// if(g1 < g3)
// {
// *maxgrad_pt = g3;
// if(g3 >= MIN_ABS_GRAD_CREATE)
// numMappablePixels++;
// }
// else
// {
// *maxgrad_pt = g1;
// if(g1 >= MIN_ABS_GRAD_CREATE)
// numMappablePixels++;
// }
// }
// if(level==0)
// this->numMappablePixels = numMappablePixels;
// FrameMemory::getInstance().returnBuffer(maxGradTemp);
//}
//********************************************************************************************************
//__global__ void cuBuildIDepthAndIDepthVarKernel()
//{
//}
//// Build IDepth And IDepth Var
//void buildIDepthAndIDepthVar( int level )
//{
// int sw = data.width[level - 1];
// const float* idepthSource = data.idepth [level - 1];
// const float* idepthVarSource = data.idepthVar[level - 1];
// float* idepthDest = data.idepth [level];
// float* idepthVarDest = data.idepthVar[level];
// for( int y = 0; y < height; y++ )
// {
// for( int x = 0; x < width; x++ )
// {
// int idx = 2 * ( x + y * sw );
// int idxDest = ( x + y * width );
// float idepthSumsSum = 0;
// float ivarSumsSum = 0;
// int num = 0;
// // build sums
// float ivar;
// float var = idepthVarSource[idx];
// if( var > 0 )
// {
// ivar = 1.0f / var;
// ivarSumsSum += ivar;
// idepthSumsSum += ivar * idepthSource[idx];
// num++;
// }
// var = idepthVarSource[ idx + 1 ];
// if( var > 0 )
// {
// ivar = 1.0f / var;
// ivarSumsSum += ivar;
// idepthSumsSum += ivar * idepthSource[ idx + 1 ];
// num++;
// }
// var = idepthVarSource[ idx + sw ];
// if( var > 0 )
// {
// ivar = 1.0f / var;
// ivarSumsSum += ivar;
// idepthSumsSum += ivar * idepthSource[ idx + sw ];
// num++;
// }
// var = idepthVarSource[ idx + sw + 1 ];
// if( var > 0 )
// {
// ivar = 1.0f / var;
// ivarSumsSum += ivar;
// idepthSumsSum += ivar * idepthSource[ idx + sw + 1 ];
// num++;
// }
// if(num > 0)
// {
// float depth = ivarSumsSum / idepthSumsSum;
// idepthDest [ idxDest ] = 1.0f / depth;
// idepthVarDest[ idxDest ] = num / ivarSumsSum;
// }
// else
// {
// idepthDest [ idxDest ] = -1;
// idepthVarDest[ idxDest ] = -1;
// }
// }
// }
//} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z18cuBuildImageKernelPfS_
.globl _Z18cuBuildImageKernelPfS_
.p2align 8
.type _Z18cuBuildImageKernelPfS_,@function
_Z18cuBuildImageKernelPfS_:
s_clause 0x1
s_load_b32 s8, s[0:1], 0x10
s_load_b128 s[0:3], s[0:1], 0x0
s_mov_b32 s7, 0
v_mov_b32_e32 v1, 0
s_waitcnt lgkmcnt(0)
s_lshl_b32 s4, s8, 1
s_mul_i32 s10, s8, s15
s_mul_i32 s5, s4, s15
s_add_i32 s10, s10, s14
s_add_i32 s6, s5, s14
s_mov_b32 s5, s7
s_lshl_b32 s6, s6, 1
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_lshl_b64 s[8:9], s[6:7], 2
s_add_u32 s0, s0, s8
s_addc_u32 s1, s1, s9
s_lshl_b64 s[8:9], s[4:5], 2
s_load_b64 s[12:13], s[0:1], 0x0
s_add_u32 s8, s0, s8
s_addc_u32 s9, s1, s9
s_or_b32 s6, s4, 1
s_delay_alu instid0(SALU_CYCLE_1)
s_lshl_b64 s[4:5], s[6:7], 2
s_load_b32 s6, s[8:9], 0x0
s_add_u32 s0, s0, s4
s_addc_u32 s1, s1, s5
s_ashr_i32 s11, s10, 31
s_load_b32 s0, s[0:1], 0x0
s_waitcnt lgkmcnt(0)
v_add_f32_e64 v0, s12, s13
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_f32_e32 v0, s6, v0
v_add_f32_e32 v0, s0, v0
s_lshl_b64 s[0:1], s[10:11], 2
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
s_add_u32 s0, s2, s0
s_addc_u32 s1, s3, s1
v_mul_f32_e32 v0, 0x3e800000, v0
global_store_b32 v1, v0, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z18cuBuildImageKernelPfS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 2
.amdhsa_next_free_sgpr 16
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z18cuBuildImageKernelPfS_, .Lfunc_end0-_Z18cuBuildImageKernelPfS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z18cuBuildImageKernelPfS_
.private_segment_fixed_size: 0
.sgpr_count: 16
.sgpr_spill_count: 0
.symbol: _Z18cuBuildImageKernelPfS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 2
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <stdio.h>
#include <assert.h>
#include <hip/hip_runtime.h>
hipDeviceProp_t deviceProps;
bool hipInit()
{
bool res = true;
int device = 0;
hipError_t err = hipGetDevice( &device );
if( err != hipSuccess )
{
printf("%s\n", hipGetErrorString(err));
res = false;
return res;
}
printf(" Device Count: %d\n", device+1 );
for(int i = 0; i <= device; i++)
{
hipGetDeviceProperties(&deviceProps, device);
printf(" Device Number: %d\n", i);
printf(" Device name: %s\n", deviceProps.name);
printf(" Memory Clock Rate (KHz): %d\n", deviceProps.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n", deviceProps.memoryBusWidth );
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*deviceProps.memoryClockRate*(deviceProps.memoryBusWidth/8)/1.0e6);
printf(" Max Threads per Block: %d\n", deviceProps.maxThreadsPerBlock );
}
// Select CUDA device
hipSetDevice(0);
return res;
}
//******************************************************************************************************
// !!! Exemple DO NOT TOCH !!!!!
//__global__ void findMean(unsigned int dataForBlock, float *inputData, float *results)
//{
// int index = blockIdx.x * blockDim.x + threadIdx.x;
// float result = 0;
// for (int i = 0; i < dataForBlock; i++)
// {
// result += inputData[index * dataForBlock + i];
// }
// result /= dataForBlock;
// results[index] = result;
//}
//void processWithGPU(float *blocks, float *results, unsigned int blockSize, unsigned int blocksCount)
//{
// unsigned int realDataCount = blockSize * blocksCount;
// cudaSetDevice(0);
// float *deviceInputData,
// *deviceResults;
// cudaMalloc( (void**)&deviceInputData, realDataCount * sizeof(float) );
// cudaMalloc( (void**)&deviceResults, realDataCount * sizeof(float) );
// cudaMemcpy( deviceInputData, blocks, realDataCount * sizeof(float), cudaMemcpyHostToDevice );
// findMean<<<1, blocksCount>>>( blockSize, deviceInputData, deviceResults );
// cudaMemcpy( (void*)results, deviceResults, blocksCount * sizeof(float), cudaMemcpyDeviceToHost );
// cudaFree(deviceInputData);
// cudaFree(deviceResults );
//}
//********************************************************************************************************
// Build Image
__global__ void cuBuildImageKernel( float* source, float* dest )
{
int x = blockIdx.x;
int y = blockIdx.y;
int offset = x + y * gridDim.x;
float* s = source + ( y * (gridDim.x * 2 ) * 2 + x * 2);
float t_[4], t_sume, t_result;
// Save temp values
t_[0] = s[0];
t_[1] = s[1];
t_[2] = s[ gridDim.x * 2];
t_[3] = s[1+gridDim.x * 2];
// Calculate sum
t_sume = t_[0] + t_[1] + t_[2] + t_[3];
// Calculate result
t_result = t_sume * 0.25;
dest[offset] = ( s[ 0 ] +
s[ 1 ] +
s[ gridDim.x * 2 ] +
s[ gridDim.x * 2 + 1] ) * 0.25f;
}
void cuBuildImage( const float* source, int sourceWidth, int sourceHeight,
const float* dest, int destWidth, int destHeight )
{
int sourceBuffLength = sourceWidth * sourceHeight;
int destBuffLength = destWidth * destHeight;
// Reserving memory on GPU
float *sourceBuff,
*destBuff;
hipMalloc( (void**)&sourceBuff, sourceBuffLength * sizeof(float) );
hipMalloc( (void**)&destBuff, destBuffLength * sizeof(float) );
// Copy input buffer
hipMemcpy( sourceBuff, source, sourceBuffLength * sizeof(float), hipMemcpyHostToDevice );
dim3 grid( destWidth, destHeight );
cuBuildImageKernel<<<grid, 1>>>( sourceBuff, destBuff );
hipMemcpy( (void*)dest, destBuff, destBuffLength * sizeof(float), hipMemcpyDeviceToHost );
hipFree( sourceBuff );
hipFree( destBuff );
}
//********************************************************************************************************
// Build Gradient
//__global__ void cuBuildGradientsKernel()
//{
//}
//void cuBuildGradients(const float* )
//{
// const float* img_pt = data.image[level] + width;
// const float* img_pt_max = data.image[level] + width * (height-1);
// float* gradxyii_pt = data.gradients[level] + width;
// // in each iteration i need -1,0,p1,mw,pw
// float val_m1 = *(img_pt-1);
// float val_00 = * img_pt;
// float val_p1;
// for(; img_pt < img_pt_max; img_pt++, gradxyii_pt++)
// {
// val_p1 = *(img_pt+1);
// *( (float*)gradxyii_pt +0) = 0.5f*(val_p1 - val_m1);
// *(((float*)gradxyii_pt)+1) = 0.5f*(*(img_pt+width) - *(img_pt-width));
// *(((float*)gradxyii_pt)+2) = val_00;
// val_m1 = val_00;
// val_00 = val_p1;
// }
//}
//********************************************************************************************************
// Build MaxGradient
//__global__ void cuBuildMaxGradientsKernel()
//{
//}
//void buildMaxGradients(int level)
//{
// float* maxGradTemp = FrameMemory::getInstance().getFloatBuffer(width * height);
// // 1. write abs gradients in real data.
// Eigen::Vector4f* gradxyii_pt = data.gradients[level] + width;
// float* maxgrad_pt = data.maxGradients[level] + width;
// float* maxgrad_pt_max = data.maxGradients[level] + width*(height-1);
// for(; maxgrad_pt < maxgrad_pt_max; maxgrad_pt++, gradxyii_pt++ )
// {
// float dx = *( (float*)gradxyii_pt);
// float dy = *(1+(float*)gradxyii_pt);
// *maxgrad_pt = sqrtf(dx*dx + dy*dy);
// }
// // 2. smear up/down direction into temp buffer
// maxgrad_pt = data.maxGradients[level] + width+1;
// maxgrad_pt_max = data.maxGradients[level] + width*(height-1)-1;
// float* maxgrad_t_pt = maxGradTemp + width+1;
// for(;maxgrad_pt<maxgrad_pt_max; maxgrad_pt++, maxgrad_t_pt++ )
// {
// float g1 = maxgrad_pt[-width];
// float g2 = maxgrad_pt[0];
// if(g1 < g2)
// g1 = g2;
// float g3 = maxgrad_pt[width];
// if(g1 < g3)
// *maxgrad_t_pt = g3;
// else
// *maxgrad_t_pt = g1;
// }
// float numMappablePixels = 0;
// // 2. smear left/right direction into real data
// maxgrad_pt = data.maxGradients[level] + width+1;
// maxgrad_pt_max = data.maxGradients[level] + width*(height-1)-1;
// maxgrad_t_pt = maxGradTemp + width+1;
// for(;maxgrad_pt<maxgrad_pt_max; maxgrad_pt++, maxgrad_t_pt++ )
// {
// float g1 = maxgrad_t_pt[-1];
// float g2 = maxgrad_t_pt[0];
// if(g1 < g2)
// g1 = g2;
// float g3 = maxgrad_t_pt[1];
// if(g1 < g3)
// {
// *maxgrad_pt = g3;
// if(g3 >= MIN_ABS_GRAD_CREATE)
// numMappablePixels++;
// }
// else
// {
// *maxgrad_pt = g1;
// if(g1 >= MIN_ABS_GRAD_CREATE)
// numMappablePixels++;
// }
// }
// if(level==0)
// this->numMappablePixels = numMappablePixels;
// FrameMemory::getInstance().returnBuffer(maxGradTemp);
//}
//********************************************************************************************************
//__global__ void cuBuildIDepthAndIDepthVarKernel()
//{
//}
//// Build IDepth And IDepth Var
//void buildIDepthAndIDepthVar( int level )
//{
// int sw = data.width[level - 1];
// const float* idepthSource = data.idepth [level - 1];
// const float* idepthVarSource = data.idepthVar[level - 1];
// float* idepthDest = data.idepth [level];
// float* idepthVarDest = data.idepthVar[level];
// for( int y = 0; y < height; y++ )
// {
// for( int x = 0; x < width; x++ )
// {
// int idx = 2 * ( x + y * sw );
// int idxDest = ( x + y * width );
// float idepthSumsSum = 0;
// float ivarSumsSum = 0;
// int num = 0;
// // build sums
// float ivar;
// float var = idepthVarSource[idx];
// if( var > 0 )
// {
// ivar = 1.0f / var;
// ivarSumsSum += ivar;
// idepthSumsSum += ivar * idepthSource[idx];
// num++;
// }
// var = idepthVarSource[ idx + 1 ];
// if( var > 0 )
// {
// ivar = 1.0f / var;
// ivarSumsSum += ivar;
// idepthSumsSum += ivar * idepthSource[ idx + 1 ];
// num++;
// }
// var = idepthVarSource[ idx + sw ];
// if( var > 0 )
// {
// ivar = 1.0f / var;
// ivarSumsSum += ivar;
// idepthSumsSum += ivar * idepthSource[ idx + sw ];
// num++;
// }
// var = idepthVarSource[ idx + sw + 1 ];
// if( var > 0 )
// {
// ivar = 1.0f / var;
// ivarSumsSum += ivar;
// idepthSumsSum += ivar * idepthSource[ idx + sw + 1 ];
// num++;
// }
// if(num > 0)
// {
// float depth = ivarSumsSum / idepthSumsSum;
// idepthDest [ idxDest ] = 1.0f / depth;
// idepthVarDest[ idxDest ] = num / ivarSumsSum;
// }
// else
// {
// idepthDest [ idxDest ] = -1;
// idepthVarDest[ idxDest ] = -1;
// }
// }
// }
//} | .text
.file "gpu_processor.hip"
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function _Z7hipInitv
.LCPI0_0:
.quad 0x412e848000000000 # double 1.0E+6
.text
.globl _Z7hipInitv
.p2align 4, 0x90
.type _Z7hipInitv,@function
_Z7hipInitv: # @_Z7hipInitv
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
pushq %rax
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -24
.cfi_offset %rbp, -16
movl $0, 4(%rsp)
leaq 4(%rsp), %rdi
callq hipGetDevice
movl %eax, %ebx
testl %eax, %eax
je .LBB0_2
# %bb.1:
movl %ebx, %edi
callq hipGetErrorString
movq %rax, %rdi
callq puts@PLT
jmp .LBB0_6
.LBB0_2:
movl 4(%rsp), %esi
incl %esi
movl $.L.str.1, %edi
xorl %eax, %eax
callq printf
movl 4(%rsp), %esi
testl %esi, %esi
js .LBB0_5
# %bb.3: # %.lr.ph.preheader
movl $-1, %ebp
.p2align 4, 0x90
.LBB0_4: # %.lr.ph
# =>This Inner Loop Header: Depth=1
incl %ebp
movl $deviceProps, %edi
callq hipGetDevicePropertiesR0600
movl $.L.str.2, %edi
movl %ebp, %esi
xorl %eax, %eax
callq printf
movl $.L.str.3, %edi
movl $deviceProps, %esi
xorl %eax, %eax
callq printf
movl deviceProps+608(%rip), %esi
movl $.L.str.4, %edi
xorl %eax, %eax
callq printf
movl deviceProps+612(%rip), %esi
movl $.L.str.5, %edi
xorl %eax, %eax
callq printf
xorps %xmm1, %xmm1
cvtsi2sdl deviceProps+608(%rip), %xmm1
addsd %xmm1, %xmm1
movl deviceProps+612(%rip), %eax
leal 7(%rax), %ecx
testl %eax, %eax
cmovnsl %eax, %ecx
sarl $3, %ecx
xorps %xmm0, %xmm0
cvtsi2sd %ecx, %xmm0
mulsd %xmm1, %xmm0
divsd .LCPI0_0(%rip), %xmm0
movl $.L.str.6, %edi
movb $1, %al
callq printf
movl deviceProps+320(%rip), %esi
movl $.L.str.7, %edi
xorl %eax, %eax
callq printf
movl 4(%rsp), %esi
cmpl %esi, %ebp
jl .LBB0_4
.LBB0_5: # %._crit_edge
xorl %edi, %edi
callq hipSetDevice
.LBB0_6:
testl %ebx, %ebx
sete %al
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size _Z7hipInitv, .Lfunc_end0-_Z7hipInitv
.cfi_endproc
# -- End function
.globl _Z33__device_stub__cuBuildImageKernelPfS_ # -- Begin function _Z33__device_stub__cuBuildImageKernelPfS_
.p2align 4, 0x90
.type _Z33__device_stub__cuBuildImageKernelPfS_,@function
_Z33__device_stub__cuBuildImageKernelPfS_: # @_Z33__device_stub__cuBuildImageKernelPfS_
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z18cuBuildImageKernelPfS_, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end1:
.size _Z33__device_stub__cuBuildImageKernelPfS_, .Lfunc_end1-_Z33__device_stub__cuBuildImageKernelPfS_
.cfi_endproc
# -- End function
.globl _Z12cuBuildImagePKfiiS0_ii # -- Begin function _Z12cuBuildImagePKfiiS0_ii
.p2align 4, 0x90
.type _Z12cuBuildImagePKfiiS0_ii,@function
_Z12cuBuildImagePKfiiS0_ii: # @_Z12cuBuildImagePKfiiS0_ii
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $104, %rsp
.cfi_def_cfa_offset 160
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %r9d, %r15d
movl %r8d, %ebp
movq %rcx, %rbx
movq %rdi, %r12
imull %edx, %esi
movl %r9d, %r14d
imull %r8d, %r14d
movslq %esi, %r13
shlq $2, %r13
leaq 8(%rsp), %rdi
movq %r13, %rsi
callq hipMalloc
movslq %r14d, %r14
shlq $2, %r14
movq %rsp, %rdi
movq %r14, %rsi
callq hipMalloc
movq 8(%rsp), %rdi
movq %r12, %rsi
movq %r13, %rdx
movl $1, %ecx
callq hipMemcpy
movl %ebp, %eax
shlq $32, %r15
orq %rax, %r15
movabsq $4294967297, %rdx # imm = 0x100000001
movq %r15, %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_2
# %bb.1:
movq 8(%rsp), %rax
movq (%rsp), %rcx
movq %rax, 72(%rsp)
movq %rcx, 64(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z18cuBuildImageKernelPfS_, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_2:
movq (%rsp), %rsi
movq %rbx, %rdi
movq %r14, %rdx
movl $2, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
callq hipFree
movq (%rsp), %rdi
callq hipFree
addq $104, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size _Z12cuBuildImagePKfiiS0_ii, .Lfunc_end2-_Z12cuBuildImagePKfiiS0_ii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z18cuBuildImageKernelPfS_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type deviceProps,@object # @deviceProps
.bss
.globl deviceProps
.p2align 3, 0x0
deviceProps:
.zero 1472
.size deviceProps, 1472
.type .L.str.1,@object # @.str.1
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.1:
.asciz " Device Count: %d\n"
.size .L.str.1, 20
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz " Device Number: %d\n"
.size .L.str.2, 21
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz " Device name: %s\n"
.size .L.str.3, 19
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz " Memory Clock Rate (KHz): %d\n"
.size .L.str.4, 31
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz " Memory Bus Width (bits): %d\n"
.size .L.str.5, 31
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz " Peak Memory Bandwidth (GB/s): %f\n\n"
.size .L.str.6, 37
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz " Max Threads per Block: %d\n"
.size .L.str.7, 29
.type _Z18cuBuildImageKernelPfS_,@object # @_Z18cuBuildImageKernelPfS_
.section .rodata,"a",@progbits
.globl _Z18cuBuildImageKernelPfS_
.p2align 3, 0x0
_Z18cuBuildImageKernelPfS_:
.quad _Z33__device_stub__cuBuildImageKernelPfS_
.size _Z18cuBuildImageKernelPfS_, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z18cuBuildImageKernelPfS_"
.size .L__unnamed_1, 27
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z33__device_stub__cuBuildImageKernelPfS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym deviceProps
.addrsig_sym _Z18cuBuildImageKernelPfS_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z18cuBuildImageKernelPfS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e220000002500 */
/*0020*/ MOV R5, c[0x0][0xc] ; /* 0x0000030000057a02 */
/* 0x000fe20000000f00 */
/*0030*/ HFMA2.MMA R15, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff0f7435 */
/* 0x000fe200000001ff */
/*0040*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0050*/ S2R R13, SR_CTAID.Y ; /* 0x00000000000d7919 */
/* 0x000e220000002600 */
/*0060*/ SHF.L.U32 R5, R5, 0x1, RZ ; /* 0x0000000105057819 */
/* 0x000fca00000006ff */
/*0070*/ IMAD R0, R5, R13, R6 ; /* 0x0000000d05007224 */
/* 0x001fca00078e0206 */
/*0080*/ SHF.L.U32 R0, R0, 0x1, RZ ; /* 0x0000000100007819 */
/* 0x000fca00000006ff */
/*0090*/ IMAD.WIDE.U32 R2, R0, R15, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fca00078e000f */
/*00a0*/ LDG.E R0, [R2.64] ; /* 0x0000000402007981 */
/* 0x000ea2000c1e1900 */
/*00b0*/ IMAD.WIDE.U32 R4, R5, 0x4, R2 ; /* 0x0000000405047825 */
/* 0x000fc600078e0002 */
/*00c0*/ LDG.E R7, [R2.64+0x4] ; /* 0x0000040402077981 */
/* 0x000ea8000c1e1900 */
/*00d0*/ LDG.E R9, [R4.64] ; /* 0x0000000404097981 */
/* 0x000ee8000c1e1900 */
/*00e0*/ LDG.E R11, [R4.64+0x4] ; /* 0x00000404040b7981 */
/* 0x000f22000c1e1900 */
/*00f0*/ IMAD R6, R13, c[0x0][0xc], R6 ; /* 0x000003000d067a24 */
/* 0x000fe400078e0206 */
/*0100*/ FADD R0, R0, R7 ; /* 0x0000000700007221 */
/* 0x004fc40000000000 */
/*0110*/ IMAD.WIDE R6, R6, R15, c[0x0][0x168] ; /* 0x00005a0006067625 */
/* 0x000fc800078e020f */
/*0120*/ FADD R0, R0, R9 ; /* 0x0000000900007221 */
/* 0x008fc80000000000 */
/*0130*/ FADD R0, R0, R11 ; /* 0x0000000b00007221 */
/* 0x010fc80000000000 */
/*0140*/ FMUL R9, R0, 0.25 ; /* 0x3e80000000097820 */
/* 0x000fca0000400000 */
/*0150*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*0160*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0170*/ BRA 0x170; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z18cuBuildImageKernelPfS_
.globl _Z18cuBuildImageKernelPfS_
.p2align 8
.type _Z18cuBuildImageKernelPfS_,@function
_Z18cuBuildImageKernelPfS_:
s_clause 0x1
s_load_b32 s8, s[0:1], 0x10
s_load_b128 s[0:3], s[0:1], 0x0
s_mov_b32 s7, 0
v_mov_b32_e32 v1, 0
s_waitcnt lgkmcnt(0)
s_lshl_b32 s4, s8, 1
s_mul_i32 s10, s8, s15
s_mul_i32 s5, s4, s15
s_add_i32 s10, s10, s14
s_add_i32 s6, s5, s14
s_mov_b32 s5, s7
s_lshl_b32 s6, s6, 1
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_lshl_b64 s[8:9], s[6:7], 2
s_add_u32 s0, s0, s8
s_addc_u32 s1, s1, s9
s_lshl_b64 s[8:9], s[4:5], 2
s_load_b64 s[12:13], s[0:1], 0x0
s_add_u32 s8, s0, s8
s_addc_u32 s9, s1, s9
s_or_b32 s6, s4, 1
s_delay_alu instid0(SALU_CYCLE_1)
s_lshl_b64 s[4:5], s[6:7], 2
s_load_b32 s6, s[8:9], 0x0
s_add_u32 s0, s0, s4
s_addc_u32 s1, s1, s5
s_ashr_i32 s11, s10, 31
s_load_b32 s0, s[0:1], 0x0
s_waitcnt lgkmcnt(0)
v_add_f32_e64 v0, s12, s13
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_f32_e32 v0, s6, v0
v_add_f32_e32 v0, s0, v0
s_lshl_b64 s[0:1], s[10:11], 2
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
s_add_u32 s0, s2, s0
s_addc_u32 s1, s3, s1
v_mul_f32_e32 v0, 0x3e800000, v0
global_store_b32 v1, v0, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z18cuBuildImageKernelPfS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 2
.amdhsa_next_free_sgpr 16
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z18cuBuildImageKernelPfS_, .Lfunc_end0-_Z18cuBuildImageKernelPfS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z18cuBuildImageKernelPfS_
.private_segment_fixed_size: 0
.sgpr_count: 16
.sgpr_spill_count: 0
.symbol: _Z18cuBuildImageKernelPfS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 2
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000bf00e_00000000-6_gpu_processor.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "%s\n"
.LC1:
.string " Device Count: %d\n"
.LC2:
.string " Device Number: %d\n"
.LC3:
.string " Device name: %s\n"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC4:
.string " Memory Clock Rate (KHz): %d\n"
.align 8
.LC5:
.string " Memory Bus Width (bits): %d\n"
.align 8
.LC7:
.string " Peak Memory Bandwidth (GB/s): %f\n\n"
.section .rodata.str1.1
.LC8:
.string " Max Threads per Block: %d\n"
.text
.globl _Z6cuInitv
.type _Z6cuInitv, @function
_Z6cuInitv:
.LFB2057:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $24, %rsp
.cfi_def_cfa_offset 64
movq %fs:40, %rax
movq %rax, 8(%rsp)
xorl %eax, %eax
movl $0, 4(%rsp)
leaq 4(%rsp), %rdi
call cudaGetDevice@PLT
testl %eax, %eax
jne .L11
movl 4(%rsp), %eax
leal 1(%rax), %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 4(%rsp), %esi
testl %esi, %esi
js .L6
movl $0, %ebp
leaq deviceProps(%rip), %rbx
leaq .LC2(%rip), %r13
leaq .LC3(%rip), %r12
.L7:
movq %rbx, %rdi
call cudaGetDeviceProperties_v2@PLT
movl %ebp, %edx
movq %r13, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %rbx, %rdx
movq %r12, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 608(%rbx), %edx
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 612(%rbx), %edx
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
pxor %xmm0, %xmm0
cvtsi2sdl 608(%rbx), %xmm0
addsd %xmm0, %xmm0
movl 612(%rbx), %edx
leal 7(%rdx), %eax
testl %edx, %edx
cmovns %edx, %eax
sarl $3, %eax
pxor %xmm1, %xmm1
cvtsi2sdl %eax, %xmm1
mulsd %xmm1, %xmm0
divsd .LC6(%rip), %xmm0
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movl 320(%rbx), %edx
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addl $1, %ebp
movl 4(%rsp), %esi
cmpl %ebp, %esi
jge .L7
.L6:
movl $0, %edi
call cudaSetDevice@PLT
movl $1, %eax
.L3:
movq 8(%rsp), %rdx
subq %fs:40, %rdx
jne .L12
addq $24, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L11:
.cfi_restore_state
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %eax
jmp .L3
.L12:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size _Z6cuInitv, .-_Z6cuInitv
.globl _Z40__device_stub__Z18cuBuildImageKernelPfS_PfS_
.type _Z40__device_stub__Z18cuBuildImageKernelPfS_PfS_, @function
_Z40__device_stub__Z18cuBuildImageKernelPfS_PfS_:
.LFB2083:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movq %rsi, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movq %rsp, %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L17
.L13:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L18
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L17:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z18cuBuildImageKernelPfS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L13
.L18:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z40__device_stub__Z18cuBuildImageKernelPfS_PfS_, .-_Z40__device_stub__Z18cuBuildImageKernelPfS_PfS_
.globl _Z18cuBuildImageKernelPfS_
.type _Z18cuBuildImageKernelPfS_, @function
_Z18cuBuildImageKernelPfS_:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z40__device_stub__Z18cuBuildImageKernelPfS_PfS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z18cuBuildImageKernelPfS_, .-_Z18cuBuildImageKernelPfS_
.globl _Z12cuBuildImagePKfiiS0_ii
.type _Z12cuBuildImagePKfiiS0_ii, @function
_Z12cuBuildImagePKfiiS0_ii:
.LFB2058:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $56, %rsp
.cfi_def_cfa_offset 112
movq %rdi, %r15
movq %rcx, %r14
movl %r8d, %r13d
movl %r9d, %r12d
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
movl %r8d, %ebp
imull %r9d, %ebp
imull %edx, %esi
movslq %esi, %rbx
salq $2, %rbx
movq %rsp, %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movslq %ebp, %rbp
salq $2, %rbp
leaq 8(%rsp), %rdi
movq %rbp, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbx, %rdx
movq %r15, %rsi
movq (%rsp), %rdi
call cudaMemcpy@PLT
movl %r13d, 16(%rsp)
movl %r12d, 20(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 28(%rsp), %rdx
movl $1, %ecx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L25
.L22:
movl $2, %ecx
movq %rbp, %rdx
movq 8(%rsp), %rsi
movq %r14, %rdi
call cudaMemcpy@PLT
movq (%rsp), %rdi
call cudaFree@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L26
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L25:
.cfi_restore_state
movq 8(%rsp), %rsi
movq (%rsp), %rdi
call _Z40__device_stub__Z18cuBuildImageKernelPfS_PfS_
jmp .L22
.L26:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2058:
.size _Z12cuBuildImagePKfiiS0_ii, .-_Z12cuBuildImagePKfiiS0_ii
.section .rodata.str1.1
.LC9:
.string "_Z18cuBuildImageKernelPfS_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC9(%rip), %rdx
movq %rdx, %rcx
leaq _Z18cuBuildImageKernelPfS_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.globl deviceProps
.bss
.align 32
.type deviceProps, @object
.size deviceProps, 1032
deviceProps:
.zero 1032
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC6:
.long 0
.long 1093567616
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "gpu_processor.hip"
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function _Z7hipInitv
.LCPI0_0:
.quad 0x412e848000000000 # double 1.0E+6
.text
.globl _Z7hipInitv
.p2align 4, 0x90
.type _Z7hipInitv,@function
_Z7hipInitv: # @_Z7hipInitv
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
pushq %rax
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -24
.cfi_offset %rbp, -16
movl $0, 4(%rsp)
leaq 4(%rsp), %rdi
callq hipGetDevice
movl %eax, %ebx
testl %eax, %eax
je .LBB0_2
# %bb.1:
movl %ebx, %edi
callq hipGetErrorString
movq %rax, %rdi
callq puts@PLT
jmp .LBB0_6
.LBB0_2:
movl 4(%rsp), %esi
incl %esi
movl $.L.str.1, %edi
xorl %eax, %eax
callq printf
movl 4(%rsp), %esi
testl %esi, %esi
js .LBB0_5
# %bb.3: # %.lr.ph.preheader
movl $-1, %ebp
.p2align 4, 0x90
.LBB0_4: # %.lr.ph
# =>This Inner Loop Header: Depth=1
incl %ebp
movl $deviceProps, %edi
callq hipGetDevicePropertiesR0600
movl $.L.str.2, %edi
movl %ebp, %esi
xorl %eax, %eax
callq printf
movl $.L.str.3, %edi
movl $deviceProps, %esi
xorl %eax, %eax
callq printf
movl deviceProps+608(%rip), %esi
movl $.L.str.4, %edi
xorl %eax, %eax
callq printf
movl deviceProps+612(%rip), %esi
movl $.L.str.5, %edi
xorl %eax, %eax
callq printf
xorps %xmm1, %xmm1
cvtsi2sdl deviceProps+608(%rip), %xmm1
addsd %xmm1, %xmm1
movl deviceProps+612(%rip), %eax
leal 7(%rax), %ecx
testl %eax, %eax
cmovnsl %eax, %ecx
sarl $3, %ecx
xorps %xmm0, %xmm0
cvtsi2sd %ecx, %xmm0
mulsd %xmm1, %xmm0
divsd .LCPI0_0(%rip), %xmm0
movl $.L.str.6, %edi
movb $1, %al
callq printf
movl deviceProps+320(%rip), %esi
movl $.L.str.7, %edi
xorl %eax, %eax
callq printf
movl 4(%rsp), %esi
cmpl %esi, %ebp
jl .LBB0_4
.LBB0_5: # %._crit_edge
xorl %edi, %edi
callq hipSetDevice
.LBB0_6:
testl %ebx, %ebx
sete %al
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size _Z7hipInitv, .Lfunc_end0-_Z7hipInitv
.cfi_endproc
# -- End function
.globl _Z33__device_stub__cuBuildImageKernelPfS_ # -- Begin function _Z33__device_stub__cuBuildImageKernelPfS_
.p2align 4, 0x90
.type _Z33__device_stub__cuBuildImageKernelPfS_,@function
_Z33__device_stub__cuBuildImageKernelPfS_: # @_Z33__device_stub__cuBuildImageKernelPfS_
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z18cuBuildImageKernelPfS_, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end1:
.size _Z33__device_stub__cuBuildImageKernelPfS_, .Lfunc_end1-_Z33__device_stub__cuBuildImageKernelPfS_
.cfi_endproc
# -- End function
.globl _Z12cuBuildImagePKfiiS0_ii # -- Begin function _Z12cuBuildImagePKfiiS0_ii
.p2align 4, 0x90
.type _Z12cuBuildImagePKfiiS0_ii,@function
_Z12cuBuildImagePKfiiS0_ii: # @_Z12cuBuildImagePKfiiS0_ii
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $104, %rsp
.cfi_def_cfa_offset 160
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %r9d, %r15d
movl %r8d, %ebp
movq %rcx, %rbx
movq %rdi, %r12
imull %edx, %esi
movl %r9d, %r14d
imull %r8d, %r14d
movslq %esi, %r13
shlq $2, %r13
leaq 8(%rsp), %rdi
movq %r13, %rsi
callq hipMalloc
movslq %r14d, %r14
shlq $2, %r14
movq %rsp, %rdi
movq %r14, %rsi
callq hipMalloc
movq 8(%rsp), %rdi
movq %r12, %rsi
movq %r13, %rdx
movl $1, %ecx
callq hipMemcpy
movl %ebp, %eax
shlq $32, %r15
orq %rax, %r15
movabsq $4294967297, %rdx # imm = 0x100000001
movq %r15, %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_2
# %bb.1:
movq 8(%rsp), %rax
movq (%rsp), %rcx
movq %rax, 72(%rsp)
movq %rcx, 64(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z18cuBuildImageKernelPfS_, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_2:
movq (%rsp), %rsi
movq %rbx, %rdi
movq %r14, %rdx
movl $2, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
callq hipFree
movq (%rsp), %rdi
callq hipFree
addq $104, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size _Z12cuBuildImagePKfiiS0_ii, .Lfunc_end2-_Z12cuBuildImagePKfiiS0_ii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z18cuBuildImageKernelPfS_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type deviceProps,@object # @deviceProps
.bss
.globl deviceProps
.p2align 3, 0x0
deviceProps:
.zero 1472
.size deviceProps, 1472
.type .L.str.1,@object # @.str.1
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.1:
.asciz " Device Count: %d\n"
.size .L.str.1, 20
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz " Device Number: %d\n"
.size .L.str.2, 21
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz " Device name: %s\n"
.size .L.str.3, 19
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz " Memory Clock Rate (KHz): %d\n"
.size .L.str.4, 31
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz " Memory Bus Width (bits): %d\n"
.size .L.str.5, 31
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz " Peak Memory Bandwidth (GB/s): %f\n\n"
.size .L.str.6, 37
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz " Max Threads per Block: %d\n"
.size .L.str.7, 29
.type _Z18cuBuildImageKernelPfS_,@object # @_Z18cuBuildImageKernelPfS_
.section .rodata,"a",@progbits
.globl _Z18cuBuildImageKernelPfS_
.p2align 3, 0x0
_Z18cuBuildImageKernelPfS_:
.quad _Z33__device_stub__cuBuildImageKernelPfS_
.size _Z18cuBuildImageKernelPfS_, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z18cuBuildImageKernelPfS_"
.size .L__unnamed_1, 27
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z33__device_stub__cuBuildImageKernelPfS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym deviceProps
.addrsig_sym _Z18cuBuildImageKernelPfS_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <stdio.h>
#include <stdlib.h>
#define N 4096
#define block_Size 256
/* function to integrate, defined as a function on the GPU device */
__device__ float myfunction(float a)
{
return a*a+2.0*a + 3.0;
}
/* kernel function to compute the summation used in the trapezoidal rule
for numerical integration */
__global__ void integratorKernel(float *a, float start, float deltaX)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id<N)
{
float x = start + (float)id * deltaX;
atomicAdd(a, myfunction(x)+myfunction(x+deltaX));
}
}
int main( int argc, char* argv[] )
{
float end = 1.0, start = 0.0;
// deltaX
float deltaX = (end-start)/(float) N;
// error code variable
cudaError_t errorcode = cudaSuccess;
// Allocate array on host and device
float *sum_h;
sum_h = (float*)malloc(sizeof(float));
*sum_h = 0.0;
float *sum_d;
if (( errorcode = cudaMalloc((void **)&sum_d, sizeof(float)))!= cudaSuccess)
{
printf("cudaMalloc(): %s/n", cudaGetErrorString(errorcode));
exit(1);
}
// Copy values from host to device
if((errorcode = cudaMemcpy( sum_d, sum_h, sizeof(float), cudaMemcpyHostToDevice))
!=cudaSuccess)
{
printf("cudaMemcpy(): %s\n", cudaGetErrorString(errorcode));
exit(1);
}
// Do the integration
int grid_Size = N/block_Size + ( N % block_Size == 0 ? 0:1);
integratorKernel <<< grid_Size, block_Size >>> (sum_d, start, deltaX);
// Copy results from device to host
if((errorcode = cudaMemcpy( sum_h, sum_d, sizeof(float), cudaMemcpyDeviceToHost))
!=cudaSuccess)
{
printf("cudaMemcpy(): %s\n", cudaGetErrorString(errorcode));
exit(1);
}
printf("The integral is: %f\n", (*sum_h)*deltaX/2.0);
// clean up
free(sum_h);
cudaFree(sum_d);
return 0;
} | code for sm_80
Function : _Z16integratorKernelPfff
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GT.AND P0, PT, R0, 0xfff, PT ; /* 0x00000fff0000780c */
/* 0x000fda0003f04270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ I2F R0, R0 ; /* 0x0000000000007306 */
/* 0x000e220000201400 */
/*0070*/ MOV R3, c[0x0][0x16c] ; /* 0x00005b0000037a02 */
/* 0x000fe20000000f00 */
/*0080*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc80000000a00 */
/*0090*/ FFMA R10, R0, R3, c[0x0][0x168] ; /* 0x00005a00000a7623 */
/* 0x001fc80000000003 */
/*00a0*/ FADD R12, R10.reuse, c[0x0][0x16c] ; /* 0x00005b000a0c7621 */
/* 0x040fe20000000000 */
/*00b0*/ F2F.F64.F32 R4, R10 ; /* 0x0000000a00047310 */
/* 0x000fe20000201800 */
/*00c0*/ FMUL R11, R10, R10 ; /* 0x0000000a0a0b7220 */
/* 0x000fe40000400000 */
/*00d0*/ FMUL R13, R12, R12 ; /* 0x0000000c0c0d7220 */
/* 0x000fca0000400000 */
/*00e0*/ F2F.F64.F32 R2, R11 ; /* 0x0000000b00027310 */
/* 0x000e300000201800 */
/*00f0*/ F2F.F64.F32 R8, R12 ; /* 0x0000000c00087310 */
/* 0x000ff00000201800 */
/*0100*/ F2F.F64.F32 R6, R13 ; /* 0x0000000d00067310 */
/* 0x000e620000201800 */
/*0110*/ DFMA R2, R4, 2, R2 ; /* 0x400000000402782b */
/* 0x0010840000000002 */
/*0120*/ MOV R4, c[0x0][0x160] ; /* 0x0000580000047a02 */
/* 0x001fe40000000f00 */
/*0130*/ MOV R5, c[0x0][0x164] ; /* 0x0000590000057a02 */
/* 0x000fe40000000f00 */
/*0140*/ DADD R2, R2, 3 ; /* 0x4008000002027429 */
/* 0x004e080000000000 */
/*0150*/ DFMA R6, R8, 2, R6 ; /* 0x400000000806782b */
/* 0x002e4c0000000006 */
/*0160*/ F2F.F32.F64 R2, R2 ; /* 0x0000000200027310 */
/* 0x001fe20000301000 */
/*0170*/ DADD R6, R6, 3 ; /* 0x4008000006067429 */
/* 0x002e140000000000 */
/*0180*/ F2F.F32.F64 R7, R6 ; /* 0x0000000600077310 */
/* 0x001e240000301000 */
/*0190*/ FADD R9, R2, R7 ; /* 0x0000000702097221 */
/* 0x001fca0000000000 */
/*01a0*/ RED.E.ADD.F32.FTZ.RN.STRONG.GPU [R4.64], R9 ; /* 0x000000090400798e */
/* 0x000fe2000c10e784 */
/*01b0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*01c0*/ BRA 0x1c0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0200*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
#include <stdlib.h>
#define N 4096
#define block_Size 256
/* function to integrate, defined as a function on the GPU device */
__device__ float myfunction(float a)
{
return a*a+2.0*a + 3.0;
}
/* kernel function to compute the summation used in the trapezoidal rule
for numerical integration */
__global__ void integratorKernel(float *a, float start, float deltaX)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id<N)
{
float x = start + (float)id * deltaX;
atomicAdd(a, myfunction(x)+myfunction(x+deltaX));
}
}
int main( int argc, char* argv[] )
{
float end = 1.0, start = 0.0;
// deltaX
float deltaX = (end-start)/(float) N;
// error code variable
cudaError_t errorcode = cudaSuccess;
// Allocate array on host and device
float *sum_h;
sum_h = (float*)malloc(sizeof(float));
*sum_h = 0.0;
float *sum_d;
if (( errorcode = cudaMalloc((void **)&sum_d, sizeof(float)))!= cudaSuccess)
{
printf("cudaMalloc(): %s/n", cudaGetErrorString(errorcode));
exit(1);
}
// Copy values from host to device
if((errorcode = cudaMemcpy( sum_d, sum_h, sizeof(float), cudaMemcpyHostToDevice))
!=cudaSuccess)
{
printf("cudaMemcpy(): %s\n", cudaGetErrorString(errorcode));
exit(1);
}
// Do the integration
int grid_Size = N/block_Size + ( N % block_Size == 0 ? 0:1);
integratorKernel <<< grid_Size, block_Size >>> (sum_d, start, deltaX);
// Copy results from device to host
if((errorcode = cudaMemcpy( sum_h, sum_d, sizeof(float), cudaMemcpyDeviceToHost))
!=cudaSuccess)
{
printf("cudaMemcpy(): %s\n", cudaGetErrorString(errorcode));
exit(1);
}
printf("The integral is: %f\n", (*sum_h)*deltaX/2.0);
// clean up
free(sum_h);
cudaFree(sum_d);
return 0;
} | .file "tmpxft_0019a61d_00000000-6_trap2.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z10myfunctionf
.type _Z10myfunctionf, @function
_Z10myfunctionf:
.LFB2057:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2057:
.size _Z10myfunctionf, .-_Z10myfunctionf
.globl _Z38__device_stub__Z16integratorKernelPfffPfff
.type _Z38__device_stub__Z16integratorKernelPfffPfff, @function
_Z38__device_stub__Z16integratorKernelPfffPfff:
.LFB2083:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movss %xmm0, 4(%rsp)
movss %xmm1, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
leaq 4(%rsp), %rax
movq %rax, 88(%rsp)
movq %rsp, %rax
movq %rax, 96(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L9
.L5:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L10
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L9:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z16integratorKernelPfff(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L5
.L10:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z38__device_stub__Z16integratorKernelPfffPfff, .-_Z38__device_stub__Z16integratorKernelPfffPfff
.globl _Z16integratorKernelPfff
.type _Z16integratorKernelPfff, @function
_Z16integratorKernelPfff:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z38__device_stub__Z16integratorKernelPfffPfff
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z16integratorKernelPfff, .-_Z16integratorKernelPfff
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "cudaMalloc(): %s/n"
.LC2:
.string "cudaMemcpy(): %s\n"
.LC5:
.string "The integral is: %f\n"
.text
.globl main
.type main, @function
main:
.LFB2058:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
subq $48, %rsp
.cfi_def_cfa_offset 64
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
movl $4, %edi
call malloc@PLT
movq %rax, %rbx
movl $0x00000000, (%rax)
leaq 8(%rsp), %rdi
movl $4, %esi
call cudaMalloc@PLT
testl %eax, %eax
jne .L20
movl $1, %ecx
movl $4, %edx
movq %rbx, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
testl %eax, %eax
jne .L21
movl $256, 28(%rsp)
movl $1, 32(%rsp)
movl $16, 16(%rsp)
movl $1, 20(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 28(%rsp), %rdx
movl $1, %ecx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L22
.L16:
movl $2, %ecx
movl $4, %edx
movq 8(%rsp), %rsi
movq %rbx, %rdi
call cudaMemcpy@PLT
testl %eax, %eax
jne .L23
movss .LC3(%rip), %xmm0
mulss (%rbx), %xmm0
cvtss2sd %xmm0, %xmm0
mulsd .LC4(%rip), %xmm0
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movq %rbx, %rdi
call free@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L24
movl $0, %eax
addq $48, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
ret
.L20:
.cfi_restore_state
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L21:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L22:
movss .LC3(%rip), %xmm1
pxor %xmm0, %xmm0
movq 8(%rsp), %rdi
call _Z38__device_stub__Z16integratorKernelPfffPfff
jmp .L16
.L23:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L24:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2058:
.size main, .-main
.section .rodata.str1.1
.LC6:
.string "_Z16integratorKernelPfff"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC6(%rip), %rdx
movq %rdx, %rcx
leaq _Z16integratorKernelPfff(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC3:
.long 964689920
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC4:
.long 0
.long 1071644672
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include <stdlib.h>
#define N 4096
#define block_Size 256
/* function to integrate, defined as a function on the GPU device */
__device__ float myfunction(float a)
{
return a*a+2.0*a + 3.0;
}
/* kernel function to compute the summation used in the trapezoidal rule
for numerical integration */
__global__ void integratorKernel(float *a, float start, float deltaX)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id<N)
{
float x = start + (float)id * deltaX;
atomicAdd(a, myfunction(x)+myfunction(x+deltaX));
}
}
int main( int argc, char* argv[] )
{
float end = 1.0, start = 0.0;
// deltaX
float deltaX = (end-start)/(float) N;
// error code variable
cudaError_t errorcode = cudaSuccess;
// Allocate array on host and device
float *sum_h;
sum_h = (float*)malloc(sizeof(float));
*sum_h = 0.0;
float *sum_d;
if (( errorcode = cudaMalloc((void **)&sum_d, sizeof(float)))!= cudaSuccess)
{
printf("cudaMalloc(): %s/n", cudaGetErrorString(errorcode));
exit(1);
}
// Copy values from host to device
if((errorcode = cudaMemcpy( sum_d, sum_h, sizeof(float), cudaMemcpyHostToDevice))
!=cudaSuccess)
{
printf("cudaMemcpy(): %s\n", cudaGetErrorString(errorcode));
exit(1);
}
// Do the integration
int grid_Size = N/block_Size + ( N % block_Size == 0 ? 0:1);
integratorKernel <<< grid_Size, block_Size >>> (sum_d, start, deltaX);
// Copy results from device to host
if((errorcode = cudaMemcpy( sum_h, sum_d, sizeof(float), cudaMemcpyDeviceToHost))
!=cudaSuccess)
{
printf("cudaMemcpy(): %s\n", cudaGetErrorString(errorcode));
exit(1);
}
printf("The integral is: %f\n", (*sum_h)*deltaX/2.0);
// clean up
free(sum_h);
cudaFree(sum_d);
return 0;
} | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#define N 4096
#define block_Size 256
/* function to integrate, defined as a function on the GPU device */
__device__ float myfunction(float a)
{
return a*a+2.0*a + 3.0;
}
/* kernel function to compute the summation used in the trapezoidal rule
for numerical integration */
__global__ void integratorKernel(float *a, float start, float deltaX)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id<N)
{
float x = start + (float)id * deltaX;
atomicAdd(a, myfunction(x)+myfunction(x+deltaX));
}
}
int main( int argc, char* argv[] )
{
float end = 1.0, start = 0.0;
// deltaX
float deltaX = (end-start)/(float) N;
// error code variable
hipError_t errorcode = hipSuccess;
// Allocate array on host and device
float *sum_h;
sum_h = (float*)malloc(sizeof(float));
*sum_h = 0.0;
float *sum_d;
if (( errorcode = hipMalloc((void **)&sum_d, sizeof(float)))!= hipSuccess)
{
printf("cudaMalloc(): %s/n", hipGetErrorString(errorcode));
exit(1);
}
// Copy values from host to device
if((errorcode = hipMemcpy( sum_d, sum_h, sizeof(float), hipMemcpyHostToDevice))
!=hipSuccess)
{
printf("cudaMemcpy(): %s\n", hipGetErrorString(errorcode));
exit(1);
}
// Do the integration
int grid_Size = N/block_Size + ( N % block_Size == 0 ? 0:1);
integratorKernel <<< grid_Size, block_Size >>> (sum_d, start, deltaX);
// Copy results from device to host
if((errorcode = hipMemcpy( sum_h, sum_d, sizeof(float), hipMemcpyDeviceToHost))
!=hipSuccess)
{
printf("cudaMemcpy(): %s\n", hipGetErrorString(errorcode));
exit(1);
}
printf("The integral is: %f\n", (*sum_h)*deltaX/2.0);
// clean up
free(sum_h);
hipFree(sum_d);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#define N 4096
#define block_Size 256
/* function to integrate, defined as a function on the GPU device */
__device__ float myfunction(float a)
{
return a*a+2.0*a + 3.0;
}
/* kernel function to compute the summation used in the trapezoidal rule
for numerical integration */
__global__ void integratorKernel(float *a, float start, float deltaX)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id<N)
{
float x = start + (float)id * deltaX;
atomicAdd(a, myfunction(x)+myfunction(x+deltaX));
}
}
int main( int argc, char* argv[] )
{
float end = 1.0, start = 0.0;
// deltaX
float deltaX = (end-start)/(float) N;
// error code variable
hipError_t errorcode = hipSuccess;
// Allocate array on host and device
float *sum_h;
sum_h = (float*)malloc(sizeof(float));
*sum_h = 0.0;
float *sum_d;
if (( errorcode = hipMalloc((void **)&sum_d, sizeof(float)))!= hipSuccess)
{
printf("cudaMalloc(): %s/n", hipGetErrorString(errorcode));
exit(1);
}
// Copy values from host to device
if((errorcode = hipMemcpy( sum_d, sum_h, sizeof(float), hipMemcpyHostToDevice))
!=hipSuccess)
{
printf("cudaMemcpy(): %s\n", hipGetErrorString(errorcode));
exit(1);
}
// Do the integration
int grid_Size = N/block_Size + ( N % block_Size == 0 ? 0:1);
integratorKernel <<< grid_Size, block_Size >>> (sum_d, start, deltaX);
// Copy results from device to host
if((errorcode = hipMemcpy( sum_h, sum_d, sizeof(float), hipMemcpyDeviceToHost))
!=hipSuccess)
{
printf("cudaMemcpy(): %s\n", hipGetErrorString(errorcode));
exit(1);
}
printf("The integral is: %f\n", (*sum_h)*deltaX/2.0);
// clean up
free(sum_h);
hipFree(sum_d);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z16integratorKernelPfff
.globl _Z16integratorKernelPfff
.p2align 8
.type _Z16integratorKernelPfff,@function
_Z16integratorKernelPfff:
s_load_b32 s2, s[0:1], 0x1c
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e32 0x1000, v1
s_cbranch_execz .LBB0_6
s_load_b64 s[4:5], s[0:1], 0x8
v_cvt_f32_i32_e32 v0, v1
s_mov_b32 s2, exec_lo
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f32 v0, v0, s5, s4
v_add_f32_e32 v4, s5, v0
v_mul_f32_e32 v2, v0, v0
v_cvt_f64_f32_e32 v[0:1], v0
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_mul_f32_e32 v6, v4, v4
v_cvt_f64_f32_e32 v[2:3], v2
v_cvt_f64_f32_e32 v[4:5], v4
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_cvt_f64_f32_e32 v[6:7], v6
v_fma_f64 v[0:1], v[0:1], 2.0, v[2:3]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_fma_f64 v[2:3], v[4:5], 2.0, v[6:7]
v_add_f64 v[0:1], v[0:1], 0x40080000
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_f64 v[2:3], v[2:3], 0x40080000
v_cvt_f32_f64_e32 v0, v[0:1]
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_cvt_f32_f64_e32 v1, v[2:3]
v_bfrev_b32_e32 v2, 1
v_add_f32_e32 v0, v0, v1
.LBB0_2:
s_ctz_i32_b32 s3, s2
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_readlane_b32 s4, v0, s3
s_lshl_b32 s3, 1, s3
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_not1_b32 s2, s2, s3
s_cmp_lg_u32 s2, 0
s_delay_alu instid0(VALU_DEP_1)
v_add_f32_e32 v2, s4, v2
s_cbranch_scc1 .LBB0_2
v_mbcnt_lo_u32_b32 v0, exec_lo, 0
s_mov_b32 s2, 0
s_mov_b32 s3, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_eq_u32_e32 0, v0
s_xor_b32 s3, exec_lo, s3
s_cbranch_execz .LBB0_6
s_load_b64 s[0:1], s[0:1], 0x0
v_mov_b32_e32 v3, 0
s_waitcnt lgkmcnt(0)
s_load_b32 s3, s[0:1], 0x0
s_waitcnt lgkmcnt(0)
v_mov_b32_e32 v1, s3
.LBB0_5:
s_delay_alu instid0(VALU_DEP_1)
v_add_f32_e32 v0, v1, v2
global_atomic_cmpswap_b32 v0, v3, v[0:1], s[0:1] glc
s_waitcnt vmcnt(0)
v_cmp_eq_u32_e32 vcc_lo, v0, v1
v_mov_b32_e32 v1, v0
s_or_b32 s2, vcc_lo, s2
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s2
s_cbranch_execnz .LBB0_5
.LBB0_6:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z16integratorKernelPfff
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 8
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z16integratorKernelPfff, .Lfunc_end0-_Z16integratorKernelPfff
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 12
.size: 4
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z16integratorKernelPfff
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z16integratorKernelPfff.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 8
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#define N 4096
#define block_Size 256
/* function to integrate, defined as a function on the GPU device */
__device__ float myfunction(float a)
{
return a*a+2.0*a + 3.0;
}
/* kernel function to compute the summation used in the trapezoidal rule
for numerical integration */
__global__ void integratorKernel(float *a, float start, float deltaX)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id<N)
{
float x = start + (float)id * deltaX;
atomicAdd(a, myfunction(x)+myfunction(x+deltaX));
}
}
int main( int argc, char* argv[] )
{
float end = 1.0, start = 0.0;
// deltaX
float deltaX = (end-start)/(float) N;
// error code variable
hipError_t errorcode = hipSuccess;
// Allocate array on host and device
float *sum_h;
sum_h = (float*)malloc(sizeof(float));
*sum_h = 0.0;
float *sum_d;
if (( errorcode = hipMalloc((void **)&sum_d, sizeof(float)))!= hipSuccess)
{
printf("cudaMalloc(): %s/n", hipGetErrorString(errorcode));
exit(1);
}
// Copy values from host to device
if((errorcode = hipMemcpy( sum_d, sum_h, sizeof(float), hipMemcpyHostToDevice))
!=hipSuccess)
{
printf("cudaMemcpy(): %s\n", hipGetErrorString(errorcode));
exit(1);
}
// Do the integration
int grid_Size = N/block_Size + ( N % block_Size == 0 ? 0:1);
integratorKernel <<< grid_Size, block_Size >>> (sum_d, start, deltaX);
// Copy results from device to host
if((errorcode = hipMemcpy( sum_h, sum_d, sizeof(float), hipMemcpyDeviceToHost))
!=hipSuccess)
{
printf("cudaMemcpy(): %s\n", hipGetErrorString(errorcode));
exit(1);
}
printf("The integral is: %f\n", (*sum_h)*deltaX/2.0);
// clean up
free(sum_h);
hipFree(sum_d);
return 0;
} | .text
.file "trap2.hip"
.globl _Z31__device_stub__integratorKernelPfff # -- Begin function _Z31__device_stub__integratorKernelPfff
.p2align 4, 0x90
.type _Z31__device_stub__integratorKernelPfff,@function
_Z31__device_stub__integratorKernelPfff: # @_Z31__device_stub__integratorKernelPfff
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movss %xmm0, 4(%rsp)
movss %xmm1, (%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 4(%rsp), %rax
movq %rax, 72(%rsp)
movq %rsp, %rax
movq %rax, 80(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z16integratorKernelPfff, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z31__device_stub__integratorKernelPfff, .Lfunc_end0-_Z31__device_stub__integratorKernelPfff
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function main
.LCPI1_0:
.long 0x39800000 # float 2.44140625E-4
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0
.LCPI1_1:
.quad 0x3fe0000000000000 # double 0.5
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $112, %rsp
.cfi_def_cfa_offset 128
.cfi_offset %rbx, -16
movl $4, %edi
callq malloc
movq %rax, %rbx
movl $0, (%rax)
leaq 8(%rsp), %rdi
movl $4, %esi
callq hipMalloc
testl %eax, %eax
jne .LBB1_1
# %bb.3:
movq 8(%rsp), %rdi
movl $4, %edx
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB1_4
# %bb.5:
movabsq $4294967312, %rdi # imm = 0x100000010
leaq 240(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_7
# %bb.6:
movq 8(%rsp), %rax
movq %rax, 72(%rsp)
movl $0, 20(%rsp)
movl $964689920, 16(%rsp) # imm = 0x39800000
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 20(%rsp), %rax
movq %rax, 88(%rsp)
leaq 16(%rsp), %rax
movq %rax, 96(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z16integratorKernelPfff, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_7:
movq 8(%rsp), %rsi
movl $4, %edx
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB1_4
# %bb.8:
movss (%rbx), %xmm0 # xmm0 = mem[0],zero,zero,zero
mulss .LCPI1_0(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
mulsd .LCPI1_1(%rip), %xmm0
movl $.L.str.2, %edi
movb $1, %al
callq printf
movq %rbx, %rdi
callq free
movq 8(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $112, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
retq
.LBB1_4:
.cfi_def_cfa_offset 128
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.1, %edi
jmp .LBB1_2
.LBB1_1:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
.LBB1_2:
movq %rax, %rsi
xorl %eax, %eax
callq printf
movl $1, %edi
callq exit
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z16integratorKernelPfff, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z16integratorKernelPfff,@object # @_Z16integratorKernelPfff
.section .rodata,"a",@progbits
.globl _Z16integratorKernelPfff
.p2align 3, 0x0
_Z16integratorKernelPfff:
.quad _Z31__device_stub__integratorKernelPfff
.size _Z16integratorKernelPfff, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "cudaMalloc(): %s/n"
.size .L.str, 19
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "cudaMemcpy(): %s\n"
.size .L.str.1, 18
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "The integral is: %f\n"
.size .L.str.2, 21
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z16integratorKernelPfff"
.size .L__unnamed_1, 25
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z31__device_stub__integratorKernelPfff
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z16integratorKernelPfff
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z16integratorKernelPfff
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GT.AND P0, PT, R0, 0xfff, PT ; /* 0x00000fff0000780c */
/* 0x000fda0003f04270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ I2F R0, R0 ; /* 0x0000000000007306 */
/* 0x000e220000201400 */
/*0070*/ MOV R3, c[0x0][0x16c] ; /* 0x00005b0000037a02 */
/* 0x000fe20000000f00 */
/*0080*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc80000000a00 */
/*0090*/ FFMA R10, R0, R3, c[0x0][0x168] ; /* 0x00005a00000a7623 */
/* 0x001fc80000000003 */
/*00a0*/ FADD R12, R10.reuse, c[0x0][0x16c] ; /* 0x00005b000a0c7621 */
/* 0x040fe20000000000 */
/*00b0*/ F2F.F64.F32 R4, R10 ; /* 0x0000000a00047310 */
/* 0x000fe20000201800 */
/*00c0*/ FMUL R11, R10, R10 ; /* 0x0000000a0a0b7220 */
/* 0x000fe40000400000 */
/*00d0*/ FMUL R13, R12, R12 ; /* 0x0000000c0c0d7220 */
/* 0x000fca0000400000 */
/*00e0*/ F2F.F64.F32 R2, R11 ; /* 0x0000000b00027310 */
/* 0x000e300000201800 */
/*00f0*/ F2F.F64.F32 R8, R12 ; /* 0x0000000c00087310 */
/* 0x000ff00000201800 */
/*0100*/ F2F.F64.F32 R6, R13 ; /* 0x0000000d00067310 */
/* 0x000e620000201800 */
/*0110*/ DFMA R2, R4, 2, R2 ; /* 0x400000000402782b */
/* 0x0010840000000002 */
/*0120*/ MOV R4, c[0x0][0x160] ; /* 0x0000580000047a02 */
/* 0x001fe40000000f00 */
/*0130*/ MOV R5, c[0x0][0x164] ; /* 0x0000590000057a02 */
/* 0x000fe40000000f00 */
/*0140*/ DADD R2, R2, 3 ; /* 0x4008000002027429 */
/* 0x004e080000000000 */
/*0150*/ DFMA R6, R8, 2, R6 ; /* 0x400000000806782b */
/* 0x002e4c0000000006 */
/*0160*/ F2F.F32.F64 R2, R2 ; /* 0x0000000200027310 */
/* 0x001fe20000301000 */
/*0170*/ DADD R6, R6, 3 ; /* 0x4008000006067429 */
/* 0x002e140000000000 */
/*0180*/ F2F.F32.F64 R7, R6 ; /* 0x0000000600077310 */
/* 0x001e240000301000 */
/*0190*/ FADD R9, R2, R7 ; /* 0x0000000702097221 */
/* 0x001fca0000000000 */
/*01a0*/ RED.E.ADD.F32.FTZ.RN.STRONG.GPU [R4.64], R9 ; /* 0x000000090400798e */
/* 0x000fe2000c10e784 */
/*01b0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*01c0*/ BRA 0x1c0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0200*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z16integratorKernelPfff
.globl _Z16integratorKernelPfff
.p2align 8
.type _Z16integratorKernelPfff,@function
_Z16integratorKernelPfff:
s_load_b32 s2, s[0:1], 0x1c
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e32 0x1000, v1
s_cbranch_execz .LBB0_6
s_load_b64 s[4:5], s[0:1], 0x8
v_cvt_f32_i32_e32 v0, v1
s_mov_b32 s2, exec_lo
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f32 v0, v0, s5, s4
v_add_f32_e32 v4, s5, v0
v_mul_f32_e32 v2, v0, v0
v_cvt_f64_f32_e32 v[0:1], v0
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_mul_f32_e32 v6, v4, v4
v_cvt_f64_f32_e32 v[2:3], v2
v_cvt_f64_f32_e32 v[4:5], v4
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_cvt_f64_f32_e32 v[6:7], v6
v_fma_f64 v[0:1], v[0:1], 2.0, v[2:3]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_fma_f64 v[2:3], v[4:5], 2.0, v[6:7]
v_add_f64 v[0:1], v[0:1], 0x40080000
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_f64 v[2:3], v[2:3], 0x40080000
v_cvt_f32_f64_e32 v0, v[0:1]
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_cvt_f32_f64_e32 v1, v[2:3]
v_bfrev_b32_e32 v2, 1
v_add_f32_e32 v0, v0, v1
.LBB0_2:
s_ctz_i32_b32 s3, s2
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_readlane_b32 s4, v0, s3
s_lshl_b32 s3, 1, s3
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_not1_b32 s2, s2, s3
s_cmp_lg_u32 s2, 0
s_delay_alu instid0(VALU_DEP_1)
v_add_f32_e32 v2, s4, v2
s_cbranch_scc1 .LBB0_2
v_mbcnt_lo_u32_b32 v0, exec_lo, 0
s_mov_b32 s2, 0
s_mov_b32 s3, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_eq_u32_e32 0, v0
s_xor_b32 s3, exec_lo, s3
s_cbranch_execz .LBB0_6
s_load_b64 s[0:1], s[0:1], 0x0
v_mov_b32_e32 v3, 0
s_waitcnt lgkmcnt(0)
s_load_b32 s3, s[0:1], 0x0
s_waitcnt lgkmcnt(0)
v_mov_b32_e32 v1, s3
.LBB0_5:
s_delay_alu instid0(VALU_DEP_1)
v_add_f32_e32 v0, v1, v2
global_atomic_cmpswap_b32 v0, v3, v[0:1], s[0:1] glc
s_waitcnt vmcnt(0)
v_cmp_eq_u32_e32 vcc_lo, v0, v1
v_mov_b32_e32 v1, v0
s_or_b32 s2, vcc_lo, s2
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s2
s_cbranch_execnz .LBB0_5
.LBB0_6:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z16integratorKernelPfff
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 8
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z16integratorKernelPfff, .Lfunc_end0-_Z16integratorKernelPfff
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 12
.size: 4
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z16integratorKernelPfff
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z16integratorKernelPfff.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 8
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0019a61d_00000000-6_trap2.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z10myfunctionf
.type _Z10myfunctionf, @function
_Z10myfunctionf:
.LFB2057:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2057:
.size _Z10myfunctionf, .-_Z10myfunctionf
.globl _Z38__device_stub__Z16integratorKernelPfffPfff
.type _Z38__device_stub__Z16integratorKernelPfffPfff, @function
_Z38__device_stub__Z16integratorKernelPfffPfff:
.LFB2083:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movss %xmm0, 4(%rsp)
movss %xmm1, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
leaq 4(%rsp), %rax
movq %rax, 88(%rsp)
movq %rsp, %rax
movq %rax, 96(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L9
.L5:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L10
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L9:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z16integratorKernelPfff(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L5
.L10:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z38__device_stub__Z16integratorKernelPfffPfff, .-_Z38__device_stub__Z16integratorKernelPfffPfff
.globl _Z16integratorKernelPfff
.type _Z16integratorKernelPfff, @function
_Z16integratorKernelPfff:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z38__device_stub__Z16integratorKernelPfffPfff
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z16integratorKernelPfff, .-_Z16integratorKernelPfff
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "cudaMalloc(): %s/n"
.LC2:
.string "cudaMemcpy(): %s\n"
.LC5:
.string "The integral is: %f\n"
.text
.globl main
.type main, @function
main:
.LFB2058:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
subq $48, %rsp
.cfi_def_cfa_offset 64
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
movl $4, %edi
call malloc@PLT
movq %rax, %rbx
movl $0x00000000, (%rax)
leaq 8(%rsp), %rdi
movl $4, %esi
call cudaMalloc@PLT
testl %eax, %eax
jne .L20
movl $1, %ecx
movl $4, %edx
movq %rbx, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
testl %eax, %eax
jne .L21
movl $256, 28(%rsp)
movl $1, 32(%rsp)
movl $16, 16(%rsp)
movl $1, 20(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 28(%rsp), %rdx
movl $1, %ecx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L22
.L16:
movl $2, %ecx
movl $4, %edx
movq 8(%rsp), %rsi
movq %rbx, %rdi
call cudaMemcpy@PLT
testl %eax, %eax
jne .L23
movss .LC3(%rip), %xmm0
mulss (%rbx), %xmm0
cvtss2sd %xmm0, %xmm0
mulsd .LC4(%rip), %xmm0
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movq %rbx, %rdi
call free@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L24
movl $0, %eax
addq $48, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
ret
.L20:
.cfi_restore_state
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L21:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L22:
movss .LC3(%rip), %xmm1
pxor %xmm0, %xmm0
movq 8(%rsp), %rdi
call _Z38__device_stub__Z16integratorKernelPfffPfff
jmp .L16
.L23:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.L24:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2058:
.size main, .-main
.section .rodata.str1.1
.LC6:
.string "_Z16integratorKernelPfff"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC6(%rip), %rdx
movq %rdx, %rcx
leaq _Z16integratorKernelPfff(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC3:
.long 964689920
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC4:
.long 0
.long 1071644672
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "trap2.hip"
.globl _Z31__device_stub__integratorKernelPfff # -- Begin function _Z31__device_stub__integratorKernelPfff
.p2align 4, 0x90
.type _Z31__device_stub__integratorKernelPfff,@function
_Z31__device_stub__integratorKernelPfff: # @_Z31__device_stub__integratorKernelPfff
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movss %xmm0, 4(%rsp)
movss %xmm1, (%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 4(%rsp), %rax
movq %rax, 72(%rsp)
movq %rsp, %rax
movq %rax, 80(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z16integratorKernelPfff, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z31__device_stub__integratorKernelPfff, .Lfunc_end0-_Z31__device_stub__integratorKernelPfff
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function main
.LCPI1_0:
.long 0x39800000 # float 2.44140625E-4
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0
.LCPI1_1:
.quad 0x3fe0000000000000 # double 0.5
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $112, %rsp
.cfi_def_cfa_offset 128
.cfi_offset %rbx, -16
movl $4, %edi
callq malloc
movq %rax, %rbx
movl $0, (%rax)
leaq 8(%rsp), %rdi
movl $4, %esi
callq hipMalloc
testl %eax, %eax
jne .LBB1_1
# %bb.3:
movq 8(%rsp), %rdi
movl $4, %edx
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB1_4
# %bb.5:
movabsq $4294967312, %rdi # imm = 0x100000010
leaq 240(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_7
# %bb.6:
movq 8(%rsp), %rax
movq %rax, 72(%rsp)
movl $0, 20(%rsp)
movl $964689920, 16(%rsp) # imm = 0x39800000
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 20(%rsp), %rax
movq %rax, 88(%rsp)
leaq 16(%rsp), %rax
movq %rax, 96(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z16integratorKernelPfff, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_7:
movq 8(%rsp), %rsi
movl $4, %edx
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB1_4
# %bb.8:
movss (%rbx), %xmm0 # xmm0 = mem[0],zero,zero,zero
mulss .LCPI1_0(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
mulsd .LCPI1_1(%rip), %xmm0
movl $.L.str.2, %edi
movb $1, %al
callq printf
movq %rbx, %rdi
callq free
movq 8(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $112, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
retq
.LBB1_4:
.cfi_def_cfa_offset 128
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.1, %edi
jmp .LBB1_2
.LBB1_1:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
.LBB1_2:
movq %rax, %rsi
xorl %eax, %eax
callq printf
movl $1, %edi
callq exit
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z16integratorKernelPfff, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z16integratorKernelPfff,@object # @_Z16integratorKernelPfff
.section .rodata,"a",@progbits
.globl _Z16integratorKernelPfff
.p2align 3, 0x0
_Z16integratorKernelPfff:
.quad _Z31__device_stub__integratorKernelPfff
.size _Z16integratorKernelPfff, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "cudaMalloc(): %s/n"
.size .L.str, 19
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "cudaMemcpy(): %s\n"
.size .L.str.1, 18
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "The integral is: %f\n"
.size .L.str.2, 21
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z16integratorKernelPfff"
.size .L__unnamed_1, 25
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z31__device_stub__integratorKernelPfff
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z16integratorKernelPfff
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "includes.h"
/*
Vector addition with a single thread for each addition
*/
/*
Vector addition with thread mapping and thread accessing its neighbor parallely
*/
//slower than simpler
/*
Matrix Matrix multiplication with a single thread for each row
*/
/*
Matrix Matrix multiplication with a single thread for each result element
*/
/*
Matrix Vector multiplication with a block with 4 threads per block, shared block mem and parallel reduce
*/
__global__ void good_addition(int *a, int *b, int *c, int len)
{
int tid= threadIdx.x + blockIdx.x * blockDim.x;
const int thread_count= blockDim.x*gridDim.x;
int step = len/thread_count;
int start_index = tid*step;
int end_index= (tid+1)* step;
if (tid==thread_count-1) end_index=len;
//printf("Step is %d\n",step);
while(start_index< end_index)
{
c[start_index]=a[start_index]+b[start_index];
//printf("I am block: %d with tid: %d Result %d \n",blockIdx.x,tid,c[tid]);
start_index +=1;
}
} | code for sm_80
Function : _Z13good_additionPiS_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff047624 */
/* 0x000fe200078e00ff */
/*0020*/ IABS R8, c[0x0][0x178] ; /* 0x00005e0000087a13 */
/* 0x000fc60000000000 */
/*0030*/ IMAD R4, R4, c[0x0][0xc], RZ ; /* 0x0000030004047a24 */
/* 0x000fca00078e02ff */
/*0040*/ IABS R5, R4.reuse ; /* 0x0000000400057213 */
/* 0x080fe40000000000 */
/*0050*/ IABS R9, R4 ; /* 0x0000000400097213 */
/* 0x000fe40000000000 */
/*0060*/ I2F.RP R0, R5 ; /* 0x0000000500007306 */
/* 0x000e300000209400 */
/*0070*/ MUFU.RCP R0, R0 ; /* 0x0000000000007308 */
/* 0x001e240000001000 */
/*0080*/ IADD3 R2, R0, 0xffffffe, RZ ; /* 0x0ffffffe00027810 */
/* 0x001fe20007ffe0ff */
/*0090*/ IMAD.MOV R0, RZ, RZ, -R9 ; /* 0x000000ffff007224 */
/* 0x000fca00078e0a09 */
/*00a0*/ F2I.FTZ.U32.TRUNC.NTZ R3, R2 ; /* 0x0000000200037305 */
/* 0x000064000021f000 */
/*00b0*/ IMAD.MOV.U32 R2, RZ, RZ, RZ ; /* 0x000000ffff027224 */
/* 0x001fe400078e00ff */
/*00c0*/ IMAD.MOV R6, RZ, RZ, -R3 ; /* 0x000000ffff067224 */
/* 0x002fc800078e0a03 */
/*00d0*/ IMAD R7, R6, R5, RZ ; /* 0x0000000506077224 */
/* 0x000fe200078e02ff */
/*00e0*/ MOV R6, R8 ; /* 0x0000000800067202 */
/* 0x000fc60000000f00 */
/*00f0*/ IMAD.HI.U32 R3, R3, R7, R2 ; /* 0x0000000703037227 */
/* 0x000fe200078e0002 */
/*0100*/ LOP3.LUT R2, R4, c[0x0][0x178], RZ, 0x3c, !PT ; /* 0x00005e0004027a12 */
/* 0x000fc800078e3cff */
/*0110*/ ISETP.GE.AND P2, PT, R2, RZ, PT ; /* 0x000000ff0200720c */
/* 0x000fe20003f46270 */
/*0120*/ IMAD.HI.U32 R3, R3, R6, RZ ; /* 0x0000000603037227 */
/* 0x000fc800078e00ff */
/*0130*/ IMAD R0, R3, R0, R6 ; /* 0x0000000003007224 */
/* 0x000fca00078e0206 */
/*0140*/ ISETP.GT.U32.AND P1, PT, R5, R0, PT ; /* 0x000000000500720c */
/* 0x000fda0003f24070 */
/*0150*/ @!P1 IMAD.IADD R0, R0, 0x1, -R5 ; /* 0x0000000100009824 */
/* 0x000fe200078e0a05 */
/*0160*/ @!P1 IADD3 R3, R3, 0x1, RZ ; /* 0x0000000103039810 */
/* 0x000fe40007ffe0ff */
/*0170*/ ISETP.NE.AND P1, PT, R4, RZ, PT ; /* 0x000000ff0400720c */
/* 0x000fe40003f25270 */
/*0180*/ ISETP.GE.U32.AND P0, PT, R0, R5, PT ; /* 0x000000050000720c */
/* 0x000fe40003f06070 */
/*0190*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*01a0*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000e2e0000002100 */
/*01b0*/ @P0 IADD3 R3, R3, 0x1, RZ ; /* 0x0000000103030810 */
/* 0x000fc80007ffe0ff */
/*01c0*/ @!P2 IADD3 R3, -R3, RZ, RZ ; /* 0x000000ff0303a210 */
/* 0x000fe40007ffe1ff */
/*01d0*/ @!P1 LOP3.LUT R3, RZ, R4, RZ, 0x33, !PT ; /* 0x00000004ff039212 */
/* 0x000fe200078e33ff */
/*01e0*/ IMAD R0, R0, c[0x0][0x0], R5 ; /* 0x0000000000007a24 */
/* 0x001fe200078e0205 */
/*01f0*/ IADD3 R5, R4, -0x1, RZ ; /* 0xffffffff04057810 */
/* 0x000fc60007ffe0ff */
/*0200*/ IMAD R9, R0.reuse, R3, RZ ; /* 0x0000000300097224 */
/* 0x040fe200078e02ff */
/*0210*/ ISETP.NE.AND P0, PT, R0, R5, PT ; /* 0x000000050000720c */
/* 0x000fc60003f05270 */
/*0220*/ IMAD.IADD R0, R3, 0x1, R9 ; /* 0x0000000103007824 */
/* 0x000fca00078e0209 */
/*0230*/ SEL R0, R0, c[0x0][0x178], P0 ; /* 0x00005e0000007a07 */
/* 0x000fc80000000000 */
/*0240*/ ISETP.GT.AND P0, PT, R0, R9, PT ; /* 0x000000090000720c */
/* 0x000fda0003f04270 */
/*0250*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*0260*/ IMAD.IADD R2, R0, 0x1, -R9 ; /* 0x0000000100027824 */
/* 0x000fe200078e0a09 */
/*0270*/ LOP3.LUT R3, RZ, R9, RZ, 0x33, !PT ; /* 0x00000009ff037212 */
/* 0x000fe200078e33ff */
/*0280*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0290*/ BSSY B0, 0x4c0 ; /* 0x0000022000007945 */
/* 0x000fe40003800000 */
/*02a0*/ LOP3.LUT P1, R8, R2, 0x3, RZ, 0xc0, !PT ; /* 0x0000000302087812 */
/* 0x000fe2000782c0ff */
/*02b0*/ IMAD.IADD R3, R0, 0x1, R3 ; /* 0x0000000100037824 */
/* 0x000fca00078e0203 */
/*02c0*/ ISETP.GE.U32.AND P0, PT, R3, 0x3, PT ; /* 0x000000030300780c */
/* 0x000fce0003f06070 */
/*02d0*/ @!P1 BRA 0x4b0 ; /* 0x000001d000009947 */
/* 0x000fea0003800000 */
/*02e0*/ HFMA2.MMA R6, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff067435 */
/* 0x000fd400000001ff */
/*02f0*/ IMAD.WIDE R2, R9, R6, c[0x0][0x170] ; /* 0x00005c0009027625 */
/* 0x000fc800078e0206 */
/*0300*/ IMAD.WIDE R4, R9, R6, c[0x0][0x168] ; /* 0x00005a0009047625 */
/* 0x000fc800078e0206 */
/*0310*/ IMAD.WIDE R6, R9, R6, c[0x0][0x160] ; /* 0x0000580009067625 */
/* 0x000fe200078e0206 */
/*0320*/ MOV R13, R5 ; /* 0x00000005000d7202 */
/* 0x000fc60000000f00 */
/*0330*/ IMAD.MOV.U32 R12, RZ, RZ, R2 ; /* 0x000000ffff0c7224 */
/* 0x000fe400078e0002 */
/*0340*/ IMAD.MOV.U32 R15, RZ, RZ, R3 ; /* 0x000000ffff0f7224 */
/* 0x000fe400078e0003 */
/*0350*/ IMAD.MOV.U32 R10, RZ, RZ, R4 ; /* 0x000000ffff0a7224 */
/* 0x000fe400078e0004 */
/*0360*/ IMAD.MOV.U32 R11, RZ, RZ, R7 ; /* 0x000000ffff0b7224 */
/* 0x000fe400078e0007 */
/*0370*/ IMAD.MOV.U32 R2, RZ, RZ, R10 ; /* 0x000000ffff027224 */
/* 0x001fe200078e000a */
/*0380*/ MOV R4, R6 ; /* 0x0000000600047202 */
/* 0x000fe20000000f00 */
/*0390*/ IMAD.MOV.U32 R5, RZ, RZ, R11 ; /* 0x000000ffff057224 */
/* 0x000fe400078e000b */
/*03a0*/ IMAD.MOV.U32 R3, RZ, RZ, R13 ; /* 0x000000ffff037224 */
/* 0x000fc800078e000d */
/*03b0*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea8000c1e1900 */
/*03c0*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x0000a2000c1e1900 */
/*03d0*/ IADD3 R8, R8, -0x1, RZ ; /* 0xffffffff08087810 */
/* 0x000fe40007ffe0ff */
/*03e0*/ MOV R3, R15 ; /* 0x0000000f00037202 */
/* 0x001fe40000000f00 */
/*03f0*/ ISETP.NE.AND P1, PT, R8, RZ, PT ; /* 0x000000ff0800720c */
/* 0x000fe40003f25270 */
/*0400*/ IADD3 R10, P3, R10, 0x4, RZ ; /* 0x000000040a0a7810 */
/* 0x000fc40007f7e0ff */
/*0410*/ IADD3 R6, P4, R6, 0x4, RZ ; /* 0x0000000406067810 */
/* 0x000fe40007f9e0ff */
/*0420*/ IADD3 R9, R9, 0x1, RZ ; /* 0x0000000109097810 */
/* 0x000fe20007ffe0ff */
/*0430*/ IMAD.X R13, RZ, RZ, R13, P3 ; /* 0x000000ffff0d7224 */
/* 0x000fe400018e060d */
/*0440*/ IMAD.X R11, RZ, RZ, R11, P4 ; /* 0x000000ffff0b7224 */
/* 0x000fe400020e060b */
/*0450*/ IMAD.IADD R7, R2, 0x1, R5 ; /* 0x0000000102077824 */
/* 0x004fe400078e0205 */
/*0460*/ IMAD.MOV.U32 R2, RZ, RZ, R12 ; /* 0x000000ffff027224 */
/* 0x000fca00078e000c */
/*0470*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */
/* 0x0001e2000c101904 */
/*0480*/ IADD3 R12, P2, R12, 0x4, RZ ; /* 0x000000040c0c7810 */
/* 0x000fca0007f5e0ff */
/*0490*/ IMAD.X R15, RZ, RZ, R15, P2 ; /* 0x000000ffff0f7224 */
/* 0x000fe200010e060f */
/*04a0*/ @P1 BRA 0x370 ; /* 0xfffffec000001947 */
/* 0x000fea000383ffff */
/*04b0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*04c0*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*04d0*/ HFMA2.MMA R6, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff067435 */
/* 0x000fe200000001ff */
/*04e0*/ IMAD.IADD R3, R0, 0x1, -R9 ; /* 0x0000000100037824 */
/* 0x001fe200078e0a09 */
/*04f0*/ BSSY B0, 0xac0 ; /* 0x000005c000007945 */
/* 0x000fe80003800000 */
/*0500*/ ISETP.GT.AND P1, PT, R3, 0xc, PT ; /* 0x0000000c0300780c */
/* 0x000fc80003f24270 */
/*0510*/ IMAD.WIDE R6, R9, R6, c[0x2][0x0] ; /* 0x0080000009067625 */
/* 0x000fca00078e0206 */
/*0520*/ IADD3 R2, P0, R6.reuse, c[0x0][0x160], RZ ; /* 0x0000580006027a10 */
/* 0x040fe40007f1e0ff */
/*0530*/ IADD3 R4, P2, R6.reuse, c[0x0][0x168], RZ ; /* 0x00005a0006047a10 */
/* 0x040fe40007f5e0ff */
/*0540*/ IADD3 R6, P3, R6, c[0x0][0x170], RZ ; /* 0x00005c0006067a10 */
/* 0x000fe40007f7e0ff */
/*0550*/ IADD3.X R3, R7.reuse, c[0x0][0x164], RZ, P0, !PT ; /* 0x0000590007037a10 */
/* 0x040fe400007fe4ff */
/*0560*/ IADD3.X R5, R7.reuse, c[0x0][0x16c], RZ, P2, !PT ; /* 0x00005b0007057a10 */
/* 0x040fe400017fe4ff */
/*0570*/ IADD3.X R7, R7, c[0x0][0x174], RZ, P3, !PT ; /* 0x00005d0007077a10 */
/* 0x000fc40001ffe4ff */
/*0580*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */
/* 0x000fe20003f0f070 */
/*0590*/ @!P1 BRA 0xab0 ; /* 0x0000051000009947 */
/* 0x000fee0003800000 */
/*05a0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40003f0e170 */
/*05b0*/ IADD3 R8, R0, -0xc, RZ ; /* 0xfffffff400087810 */
/* 0x000fc60007ffe0ff */
/*05c0*/ LDG.E R10, [R4.64+-0x8] ; /* 0xfffff804040a7981 */
/* 0x000ea8000c1e1900 */
/*05d0*/ LDG.E R11, [R2.64+-0x8] ; /* 0xfffff804020b7981 */
/* 0x000ea4000c1e1900 */
/*05e0*/ IMAD.IADD R11, R10, 0x1, R11 ; /* 0x000000010a0b7824 */
/* 0x004fca00078e020b */
/*05f0*/ STG.E [R6.64+-0x8], R11 ; /* 0xfffff80b06007986 */
/* 0x0001e8000c101904 */
/*0600*/ LDG.E R10, [R4.64+-0x4] ; /* 0xfffffc04040a7981 */
/* 0x000ea8000c1e1900 */
/*0610*/ LDG.E R13, [R2.64+-0x4] ; /* 0xfffffc04020d7981 */
/* 0x000ea4000c1e1900 */
/*0620*/ IMAD.IADD R13, R10, 0x1, R13 ; /* 0x000000010a0d7824 */
/* 0x004fca00078e020d */
/*0630*/ STG.E [R6.64+-0x4], R13 ; /* 0xfffffc0d06007986 */
/* 0x0003e8000c101904 */
/*0640*/ LDG.E R10, [R4.64] ; /* 0x00000004040a7981 */
/* 0x000ea8000c1e1900 */
/*0650*/ LDG.E R15, [R2.64] ; /* 0x00000004020f7981 */
/* 0x000ea4000c1e1900 */
/*0660*/ IADD3 R15, R10, R15, RZ ; /* 0x0000000f0a0f7210 */
/* 0x004fca0007ffe0ff */
/*0670*/ STG.E [R6.64], R15 ; /* 0x0000000f06007986 */
/* 0x0005e8000c101904 */
/*0680*/ LDG.E R10, [R4.64+0x4] ; /* 0x00000404040a7981 */
/* 0x000ee8000c1e1900 */
/*0690*/ LDG.E R17, [R2.64+0x4] ; /* 0x0000040402117981 */
/* 0x000ee4000c1e1900 */
/*06a0*/ IMAD.IADD R17, R10, 0x1, R17 ; /* 0x000000010a117824 */
/* 0x008fca00078e0211 */
/*06b0*/ STG.E [R6.64+0x4], R17 ; /* 0x0000041106007986 */
/* 0x0007e8000c101904 */
/*06c0*/ LDG.E R10, [R4.64+0x8] ; /* 0x00000804040a7981 */
/* 0x000f28000c1e1900 */
/*06d0*/ LDG.E R11, [R2.64+0x8] ; /* 0x00000804020b7981 */
/* 0x001f24000c1e1900 */
/*06e0*/ IMAD.IADD R11, R10, 0x1, R11 ; /* 0x000000010a0b7824 */
/* 0x010fca00078e020b */
/*06f0*/ STG.E [R6.64+0x8], R11 ; /* 0x0000080b06007986 */
/* 0x0001e8000c101904 */
/*0700*/ LDG.E R10, [R4.64+0xc] ; /* 0x00000c04040a7981 */
/* 0x000f28000c1e1900 */
/*0710*/ LDG.E R13, [R2.64+0xc] ; /* 0x00000c04020d7981 */
/* 0x002f24000c1e1900 */
/*0720*/ IMAD.IADD R13, R10, 0x1, R13 ; /* 0x000000010a0d7824 */
/* 0x010fca00078e020d */
/*0730*/ STG.E [R6.64+0xc], R13 ; /* 0x00000c0d06007986 */
/* 0x0003e8000c101904 */
/*0740*/ LDG.E R10, [R4.64+0x10] ; /* 0x00001004040a7981 */
/* 0x000f28000c1e1900 */
/*0750*/ LDG.E R15, [R2.64+0x10] ; /* 0x00001004020f7981 */
/* 0x004f24000c1e1900 */
/*0760*/ IADD3 R15, R10, R15, RZ ; /* 0x0000000f0a0f7210 */
/* 0x010fca0007ffe0ff */
/*0770*/ STG.E [R6.64+0x10], R15 ; /* 0x0000100f06007986 */
/* 0x0005e8000c101904 */
/*0780*/ LDG.E R10, [R4.64+0x14] ; /* 0x00001404040a7981 */
/* 0x000f28000c1e1900 */
/*0790*/ LDG.E R17, [R2.64+0x14] ; /* 0x0000140402117981 */
/* 0x008f24000c1e1900 */
/*07a0*/ IMAD.IADD R17, R10, 0x1, R17 ; /* 0x000000010a117824 */
/* 0x010fca00078e0211 */
/*07b0*/ STG.E [R6.64+0x14], R17 ; /* 0x0000141106007986 */
/* 0x0007e8000c101904 */
/*07c0*/ LDG.E R10, [R4.64+0x18] ; /* 0x00001804040a7981 */
/* 0x000f28000c1e1900 */
/*07d0*/ LDG.E R11, [R2.64+0x18] ; /* 0x00001804020b7981 */
/* 0x001f24000c1e1900 */
/*07e0*/ IMAD.IADD R11, R10, 0x1, R11 ; /* 0x000000010a0b7824 */
/* 0x010fca00078e020b */
/*07f0*/ STG.E [R6.64+0x18], R11 ; /* 0x0000180b06007986 */
/* 0x0001e8000c101904 */
/*0800*/ LDG.E R10, [R4.64+0x1c] ; /* 0x00001c04040a7981 */
/* 0x000f28000c1e1900 */
/*0810*/ LDG.E R13, [R2.64+0x1c] ; /* 0x00001c04020d7981 */
/* 0x002f24000c1e1900 */
/*0820*/ IMAD.IADD R13, R10, 0x1, R13 ; /* 0x000000010a0d7824 */
/* 0x010fca00078e020d */
/*0830*/ STG.E [R6.64+0x1c], R13 ; /* 0x00001c0d06007986 */
/* 0x0003e8000c101904 */
/*0840*/ LDG.E R10, [R4.64+0x20] ; /* 0x00002004040a7981 */
/* 0x000f28000c1e1900 */
/*0850*/ LDG.E R15, [R2.64+0x20] ; /* 0x00002004020f7981 */
/* 0x004f24000c1e1900 */
/*0860*/ IADD3 R15, R10, R15, RZ ; /* 0x0000000f0a0f7210 */
/* 0x010fca0007ffe0ff */
/*0870*/ STG.E [R6.64+0x20], R15 ; /* 0x0000200f06007986 */
/* 0x0005e8000c101904 */
/*0880*/ LDG.E R10, [R4.64+0x24] ; /* 0x00002404040a7981 */
/* 0x000f28000c1e1900 */
/*0890*/ LDG.E R17, [R2.64+0x24] ; /* 0x0000240402117981 */
/* 0x008f24000c1e1900 */
/*08a0*/ IMAD.IADD R17, R10, 0x1, R17 ; /* 0x000000010a117824 */
/* 0x010fca00078e0211 */
/*08b0*/ STG.E [R6.64+0x24], R17 ; /* 0x0000241106007986 */
/* 0x0007e8000c101904 */
/*08c0*/ LDG.E R10, [R4.64+0x28] ; /* 0x00002804040a7981 */
/* 0x000f28000c1e1900 */
/*08d0*/ LDG.E R11, [R2.64+0x28] ; /* 0x00002804020b7981 */
/* 0x001f24000c1e1900 */
/*08e0*/ IMAD.IADD R11, R10, 0x1, R11 ; /* 0x000000010a0b7824 */
/* 0x010fca00078e020b */
/*08f0*/ STG.E [R6.64+0x28], R11 ; /* 0x0000280b06007986 */
/* 0x000fe8000c101904 */
/*0900*/ LDG.E R10, [R4.64+0x2c] ; /* 0x00002c04040a7981 */
/* 0x000f28000c1e1900 */
/*0910*/ LDG.E R13, [R2.64+0x2c] ; /* 0x00002c04020d7981 */
/* 0x002f24000c1e1900 */
/*0920*/ IMAD.IADD R13, R10, 0x1, R13 ; /* 0x000000010a0d7824 */
/* 0x010fca00078e020d */
/*0930*/ STG.E [R6.64+0x2c], R13 ; /* 0x00002c0d06007986 */
/* 0x0001e8000c101904 */
/*0940*/ LDG.E R10, [R4.64+0x30] ; /* 0x00003004040a7981 */
/* 0x000f28000c1e1900 */
/*0950*/ LDG.E R15, [R2.64+0x30] ; /* 0x00003004020f7981 */
/* 0x004f22000c1e1900 */
/*0960*/ IADD3 R9, R9, 0x10, RZ ; /* 0x0000001009097810 */
/* 0x000fe40007ffe0ff */
/*0970*/ IADD3 R15, R10, R15, RZ ; /* 0x0000000f0a0f7210 */
/* 0x010fca0007ffe0ff */
/*0980*/ STG.E [R6.64+0x30], R15 ; /* 0x0000300f06007986 */
/* 0x0003e8000c101904 */
/*0990*/ LDG.E R10, [R4.64+0x34] ; /* 0x00003404040a7981 */
/* 0x000528000c1e1900 */
/*09a0*/ LDG.E R17, [R2.64+0x34] ; /* 0x0000340402117981 */
/* 0x008722000c1e1900 */
/*09b0*/ ISETP.GE.AND P1, PT, R9, R8, PT ; /* 0x000000080900720c */
/* 0x000fe40003f26270 */
/*09c0*/ IADD3 R12, P3, R4, 0x40, RZ ; /* 0x00000040040c7810 */
/* 0x000fc40007f7e0ff */
/*09d0*/ IADD3 R14, P2, R2, 0x40, RZ ; /* 0x00000040020e7810 */
/* 0x000fc60007f5e0ff */
/*09e0*/ IMAD.X R13, RZ, RZ, R5, P3 ; /* 0x000000ffff0d7224 */
/* 0x001fe200018e0605 */
/*09f0*/ IADD3.X R15, RZ, R3, RZ, P2, !PT ; /* 0x00000003ff0f7210 */
/* 0x002fe200017fe4ff */
/*0a00*/ IMAD.MOV.U32 R4, RZ, RZ, R12 ; /* 0x000000ffff047224 */
/* 0x004fe400078e000c */
/*0a10*/ IMAD.MOV.U32 R2, RZ, RZ, R14 ; /* 0x000000ffff027224 */
/* 0x008fe200078e000e */
/*0a20*/ MOV R5, R13 ; /* 0x0000000d00057202 */
/* 0x000fe20000000f00 */
/*0a30*/ IMAD.MOV.U32 R3, RZ, RZ, R15 ; /* 0x000000ffff037224 */
/* 0x000fe400078e000f */
/*0a40*/ IMAD.IADD R17, R10, 0x1, R17 ; /* 0x000000010a117824 */
/* 0x010fe200078e0211 */
/*0a50*/ IADD3 R10, P4, R6, 0x40, RZ ; /* 0x00000040060a7810 */
/* 0x000fc80007f9e0ff */
/*0a60*/ STG.E [R6.64+0x34], R17 ; /* 0x0000341106007986 */
/* 0x0001e2000c101904 */
/*0a70*/ IMAD.X R11, RZ, RZ, R7, P4 ; /* 0x000000ffff0b7224 */
/* 0x000fe200020e0607 */
/*0a80*/ MOV R6, R10 ; /* 0x0000000a00067202 */
/* 0x001fc60000000f00 */
/*0a90*/ IMAD.MOV.U32 R7, RZ, RZ, R11 ; /* 0x000000ffff077224 */
/* 0x000fe200078e000b */
/*0aa0*/ @!P1 BRA 0x5c0 ; /* 0xfffffb1000009947 */
/* 0x000fea000383ffff */
/*0ab0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0ac0*/ IMAD.IADD R8, R0, 0x1, -R9 ; /* 0x0000000100087824 */
/* 0x000fe200078e0a09 */
/*0ad0*/ BSSY B0, 0xde0 ; /* 0x0000030000007945 */
/* 0x000fe80003800000 */
/*0ae0*/ ISETP.GT.AND P1, PT, R8, 0x4, PT ; /* 0x000000040800780c */
/* 0x000fda0003f24270 */
/*0af0*/ @!P1 BRA 0xdd0 ; /* 0x000002d000009947 */
/* 0x000fea0003800000 */
/*0b00*/ LDG.E R8, [R4.64+-0x8] ; /* 0xfffff80404087981 */
/* 0x000ea8000c1e1900 */
/*0b10*/ LDG.E R11, [R2.64+-0x8] ; /* 0xfffff804020b7981 */
/* 0x000ea4000c1e1900 */
/*0b20*/ IADD3 R11, R8, R11, RZ ; /* 0x0000000b080b7210 */
/* 0x004fca0007ffe0ff */
/*0b30*/ STG.E [R6.64+-0x8], R11 ; /* 0xfffff80b06007986 */
/* 0x0001e8000c101904 */
/*0b40*/ LDG.E R8, [R4.64+-0x4] ; /* 0xfffffc0404087981 */
/* 0x000ea8000c1e1900 */
/*0b50*/ LDG.E R13, [R2.64+-0x4] ; /* 0xfffffc04020d7981 */
/* 0x000ea4000c1e1900 */
/*0b60*/ IMAD.IADD R13, R8, 0x1, R13 ; /* 0x00000001080d7824 */
/* 0x004fca00078e020d */
/*0b70*/ STG.E [R6.64+-0x4], R13 ; /* 0xfffffc0d06007986 */
/* 0x0003e8000c101904 */
/*0b80*/ LDG.E R8, [R4.64] ; /* 0x0000000404087981 */
/* 0x000ea8000c1e1900 */
/*0b90*/ LDG.E R15, [R2.64] ; /* 0x00000004020f7981 */
/* 0x000ea4000c1e1900 */
/*0ba0*/ IMAD.IADD R15, R8, 0x1, R15 ; /* 0x00000001080f7824 */
/* 0x004fca00078e020f */
/*0bb0*/ STG.E [R6.64], R15 ; /* 0x0000000f06007986 */
/* 0x0005e8000c101904 */
/*0bc0*/ LDG.E R8, [R4.64+0x4] ; /* 0x0000040404087981 */
/* 0x000ee8000c1e1900 */
/*0bd0*/ LDG.E R17, [R2.64+0x4] ; /* 0x0000040402117981 */
/* 0x000ee4000c1e1900 */
/*0be0*/ IADD3 R17, R8, R17, RZ ; /* 0x0000001108117210 */
/* 0x008fca0007ffe0ff */
/*0bf0*/ STG.E [R6.64+0x4], R17 ; /* 0x0000041106007986 */
/* 0x0007e8000c101904 */
/*0c00*/ LDG.E R8, [R4.64+0x8] ; /* 0x0000080404087981 */
/* 0x000f28000c1e1900 */
/*0c10*/ LDG.E R11, [R2.64+0x8] ; /* 0x00000804020b7981 */
/* 0x001f24000c1e1900 */
/*0c20*/ IMAD.IADD R11, R8, 0x1, R11 ; /* 0x00000001080b7824 */
/* 0x010fca00078e020b */
/*0c30*/ STG.E [R6.64+0x8], R11 ; /* 0x0000080b06007986 */
/* 0x000fe8000c101904 */
/*0c40*/ LDG.E R8, [R4.64+0xc] ; /* 0x00000c0404087981 */
/* 0x000f28000c1e1900 */
/*0c50*/ LDG.E R13, [R2.64+0xc] ; /* 0x00000c04020d7981 */
/* 0x002f24000c1e1900 */
/*0c60*/ IMAD.IADD R13, R8, 0x1, R13 ; /* 0x00000001080d7824 */
/* 0x010fca00078e020d */
/*0c70*/ STG.E [R6.64+0xc], R13 ; /* 0x00000c0d06007986 */
/* 0x0001e8000c101904 */
/*0c80*/ LDG.E R8, [R4.64+0x10] ; /* 0x0000100404087981 */
/* 0x000f28000c1e1900 */
/*0c90*/ LDG.E R15, [R2.64+0x10] ; /* 0x00001004020f7981 */
/* 0x004f24000c1e1900 */
/*0ca0*/ IADD3 R15, R8, R15, RZ ; /* 0x0000000f080f7210 */
/* 0x010fca0007ffe0ff */
/*0cb0*/ STG.E [R6.64+0x10], R15 ; /* 0x0000100f06007986 */
/* 0x000fe8000c101904 */
/*0cc0*/ LDG.E R8, [R4.64+0x14] ; /* 0x0000140404087981 */
/* 0x0002a8000c1e1900 */
/*0cd0*/ LDG.E R17, [R2.64+0x14] ; /* 0x0000140402117981 */
/* 0x0086a2000c1e1900 */
/*0ce0*/ IADD3 R12, P1, R2, 0x20, RZ ; /* 0x00000020020c7810 */
/* 0x000fe40007f3e0ff */
/*0cf0*/ IADD3 R10, P2, R4, 0x20, RZ ; /* 0x00000020040a7810 */
/* 0x000fc40007f5e0ff */
/*0d00*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe20003f0e170 */
/*0d10*/ IMAD.X R13, RZ, RZ, R3, P1 ; /* 0x000000ffff0d7224 */
/* 0x001fe200008e0603 */
/*0d20*/ IADD3.X R5, RZ, R5, RZ, P2, !PT ; /* 0x00000005ff057210 */
/* 0x002fe200017fe4ff */
/*0d30*/ IMAD.MOV.U32 R4, RZ, RZ, R10 ; /* 0x000000ffff047224 */
/* 0x000fe200078e000a */
/*0d40*/ IADD3 R9, R9, 0x8, RZ ; /* 0x0000000809097810 */
/* 0x000fe20007ffe0ff */
/*0d50*/ IMAD.MOV.U32 R3, RZ, RZ, R13 ; /* 0x000000ffff037224 */
/* 0x008fe200078e000d */
/*0d60*/ MOV R2, R12 ; /* 0x0000000c00027202 */
/* 0x000fe20000000f00 */
/*0d70*/ IMAD.IADD R17, R8, 0x1, R17 ; /* 0x0000000108117824 */
/* 0x004fe200078e0211 */
/*0d80*/ IADD3 R8, P3, R6, 0x20, RZ ; /* 0x0000002006087810 */
/* 0x000fc80007f7e0ff */
/*0d90*/ STG.E [R6.64+0x14], R17 ; /* 0x0000141106007986 */
/* 0x0001e2000c101904 */
/*0da0*/ IMAD.X R11, RZ, RZ, R7, P3 ; /* 0x000000ffff0b7224 */
/* 0x000fe400018e0607 */
/*0db0*/ IMAD.MOV.U32 R6, RZ, RZ, R8 ; /* 0x000000ffff067224 */
/* 0x001fc600078e0008 */
/*0dc0*/ MOV R7, R11 ; /* 0x0000000b00077202 */
/* 0x000fe40000000f00 */
/*0dd0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0de0*/ ISETP.LT.OR P0, PT, R9, R0, P0 ; /* 0x000000000900720c */
/* 0x000fda0000701670 */
/*0df0*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*0e00*/ LDG.E R0, [R4.64+-0x8] ; /* 0xfffff80404007981 */
/* 0x000ea8000c1e1900 */
/*0e10*/ LDG.E R9, [R2.64+-0x8] ; /* 0xfffff80402097981 */
/* 0x000ea4000c1e1900 */
/*0e20*/ IMAD.IADD R9, R0, 0x1, R9 ; /* 0x0000000100097824 */
/* 0x004fca00078e0209 */
/*0e30*/ STG.E [R6.64+-0x8], R9 ; /* 0xfffff80906007986 */
/* 0x000fe8000c101904 */
/*0e40*/ LDG.E R0, [R4.64+-0x4] ; /* 0xfffffc0404007981 */
/* 0x000ea8000c1e1900 */
/*0e50*/ LDG.E R11, [R2.64+-0x4] ; /* 0xfffffc04020b7981 */
/* 0x000ea4000c1e1900 */
/*0e60*/ IMAD.IADD R11, R0, 0x1, R11 ; /* 0x00000001000b7824 */
/* 0x004fca00078e020b */
/*0e70*/ STG.E [R6.64+-0x4], R11 ; /* 0xfffffc0b06007986 */
/* 0x000fe8000c101904 */
/*0e80*/ LDG.E R0, [R4.64] ; /* 0x0000000404007981 */
/* 0x000ea8000c1e1900 */
/*0e90*/ LDG.E R13, [R2.64] ; /* 0x00000004020d7981 */
/* 0x000ea4000c1e1900 */
/*0ea0*/ IADD3 R13, R0, R13, RZ ; /* 0x0000000d000d7210 */
/* 0x004fca0007ffe0ff */
/*0eb0*/ STG.E [R6.64], R13 ; /* 0x0000000d06007986 */
/* 0x000fe8000c101904 */
/*0ec0*/ LDG.E R0, [R4.64+0x4] ; /* 0x0000040404007981 */
/* 0x000ea8000c1e1900 */
/*0ed0*/ LDG.E R15, [R2.64+0x4] ; /* 0x00000404020f7981 */
/* 0x000ea4000c1e1900 */
/*0ee0*/ IMAD.IADD R15, R0, 0x1, R15 ; /* 0x00000001000f7824 */
/* 0x004fca00078e020f */
/*0ef0*/ STG.E [R6.64+0x4], R15 ; /* 0x0000040f06007986 */
/* 0x000fe2000c101904 */
/*0f00*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0f10*/ BRA 0xf10; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0f20*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0f30*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0f40*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0f50*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0f60*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0f70*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0f80*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0f90*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0fa0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0fb0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0fc0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0fd0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0fe0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ff0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
/*
Vector addition with a single thread for each addition
*/
/*
Vector addition with thread mapping and thread accessing its neighbor parallely
*/
//slower than simpler
/*
Matrix Matrix multiplication with a single thread for each row
*/
/*
Matrix Matrix multiplication with a single thread for each result element
*/
/*
Matrix Vector multiplication with a block with 4 threads per block, shared block mem and parallel reduce
*/
__global__ void good_addition(int *a, int *b, int *c, int len)
{
int tid= threadIdx.x + blockIdx.x * blockDim.x;
const int thread_count= blockDim.x*gridDim.x;
int step = len/thread_count;
int start_index = tid*step;
int end_index= (tid+1)* step;
if (tid==thread_count-1) end_index=len;
//printf("Step is %d\n",step);
while(start_index< end_index)
{
c[start_index]=a[start_index]+b[start_index];
//printf("I am block: %d with tid: %d Result %d \n",blockIdx.x,tid,c[tid]);
start_index +=1;
}
} | .file "tmpxft_0006a0f4_00000000-6_good_addition.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z38__device_stub__Z13good_additionPiS_S_iPiS_S_i
.type _Z38__device_stub__Z13good_additionPiS_S_iPiS_S_i, @function
_Z38__device_stub__Z13good_additionPiS_S_iPiS_S_i:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z13good_additionPiS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z38__device_stub__Z13good_additionPiS_S_iPiS_S_i, .-_Z38__device_stub__Z13good_additionPiS_S_iPiS_S_i
.globl _Z13good_additionPiS_S_i
.type _Z13good_additionPiS_S_i, @function
_Z13good_additionPiS_S_i:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z38__device_stub__Z13good_additionPiS_S_iPiS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z13good_additionPiS_S_i, .-_Z13good_additionPiS_S_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z13good_additionPiS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z13good_additionPiS_S_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
/*
Vector addition with a single thread for each addition
*/
/*
Vector addition with thread mapping and thread accessing its neighbor parallely
*/
//slower than simpler
/*
Matrix Matrix multiplication with a single thread for each row
*/
/*
Matrix Matrix multiplication with a single thread for each result element
*/
/*
Matrix Vector multiplication with a block with 4 threads per block, shared block mem and parallel reduce
*/
__global__ void good_addition(int *a, int *b, int *c, int len)
{
int tid= threadIdx.x + blockIdx.x * blockDim.x;
const int thread_count= blockDim.x*gridDim.x;
int step = len/thread_count;
int start_index = tid*step;
int end_index= (tid+1)* step;
if (tid==thread_count-1) end_index=len;
//printf("Step is %d\n",step);
while(start_index< end_index)
{
c[start_index]=a[start_index]+b[start_index];
//printf("I am block: %d with tid: %d Result %d \n",blockIdx.x,tid,c[tid]);
start_index +=1;
}
} | #include <hip/hip_runtime.h>
#include "includes.h"
/*
Vector addition with a single thread for each addition
*/
/*
Vector addition with thread mapping and thread accessing its neighbor parallely
*/
//slower than simpler
/*
Matrix Matrix multiplication with a single thread for each row
*/
/*
Matrix Matrix multiplication with a single thread for each result element
*/
/*
Matrix Vector multiplication with a block with 4 threads per block, shared block mem and parallel reduce
*/
__global__ void good_addition(int *a, int *b, int *c, int len)
{
int tid= threadIdx.x + blockIdx.x * blockDim.x;
const int thread_count= blockDim.x*gridDim.x;
int step = len/thread_count;
int start_index = tid*step;
int end_index= (tid+1)* step;
if (tid==thread_count-1) end_index=len;
//printf("Step is %d\n",step);
while(start_index< end_index)
{
c[start_index]=a[start_index]+b[start_index];
//printf("I am block: %d with tid: %d Result %d \n",blockIdx.x,tid,c[tid]);
start_index +=1;
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
/*
Vector addition with a single thread for each addition
*/
/*
Vector addition with thread mapping and thread accessing its neighbor parallely
*/
//slower than simpler
/*
Matrix Matrix multiplication with a single thread for each row
*/
/*
Matrix Matrix multiplication with a single thread for each result element
*/
/*
Matrix Vector multiplication with a block with 4 threads per block, shared block mem and parallel reduce
*/
__global__ void good_addition(int *a, int *b, int *c, int len)
{
int tid= threadIdx.x + blockIdx.x * blockDim.x;
const int thread_count= blockDim.x*gridDim.x;
int step = len/thread_count;
int start_index = tid*step;
int end_index= (tid+1)* step;
if (tid==thread_count-1) end_index=len;
//printf("Step is %d\n",step);
while(start_index< end_index)
{
c[start_index]=a[start_index]+b[start_index];
//printf("I am block: %d with tid: %d Result %d \n",blockIdx.x,tid,c[tid]);
start_index +=1;
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z13good_additionPiS_S_i
.globl _Z13good_additionPiS_S_i
.p2align 8
.type _Z13good_additionPiS_S_i,@function
_Z13good_additionPiS_S_i:
s_clause 0x2
s_load_b32 s2, s[0:1], 0x2c
s_load_b32 s3, s[0:1], 0x20
s_load_b32 s6, s[0:1], 0x18
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_ashr_i32 s9, s6, 31
s_mul_i32 s3, s3, s2
s_add_i32 s10, s6, s9
s_ashr_i32 s4, s3, 31
s_xor_b32 s10, s10, s9
s_add_i32 s5, s3, s4
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
s_xor_b32 s5, s5, s4
s_xor_b32 s4, s9, s4
v_cvt_f32_u32_e32 v1, s5
s_sub_i32 s8, 0, s5
v_rcp_iflag_f32_e32 v1, v1
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v1, 0x4f7ffffe, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cvt_u32_f32_e32 v1, v1
v_readfirstlane_b32 s7, v1
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_mul_i32 s8, s8, s7
s_mul_hi_u32 s8, s7, s8
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_add_i32 s7, s7, s8
s_mul_hi_u32 s7, s10, s7
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
s_mul_i32 s8, s7, s5
s_add_i32 s9, s7, 1
s_sub_i32 s8, s10, s8
s_sub_i32 s10, s8, s5
s_cmp_ge_u32 s8, s5
s_cselect_b32 s2, s9, s7
s_cselect_b32 s7, s10, s8
s_add_i32 s8, s2, 1
s_cmp_ge_u32 s7, s5
s_cselect_b32 s2, s8, s2
s_add_i32 s3, s3, -1
s_xor_b32 s2, s2, s4
v_cmp_eq_u32_e32 vcc_lo, s3, v1
s_sub_i32 s2, s2, s4
s_mov_b32 s3, exec_lo
v_mul_lo_u32 v0, v1, s2
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v2, s2, v0
s_mov_b32 s2, 0
v_cndmask_b32_e64 v7, v2, s6, vcc_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_lt_i32_e64 v0, v7
s_cbranch_execz .LBB0_3
s_clause 0x1
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[0:1], s[0:1], 0x10
v_ashrrev_i32_e32 v1, 31, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[5:6], 2, v[0:1]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v1, vcc_lo, s4, v5
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v2, vcc_lo, s5, v6, vcc_lo
v_add_co_u32 v3, vcc_lo, s6, v5
v_add_co_ci_u32_e32 v4, vcc_lo, s7, v6, vcc_lo
v_add_co_u32 v5, vcc_lo, s0, v5
v_add_co_ci_u32_e32 v6, vcc_lo, s1, v6, vcc_lo
.p2align 6
.LBB0_2:
global_load_b32 v8, v[1:2], off
global_load_b32 v9, v[3:4], off
v_add_co_u32 v1, vcc_lo, v1, 4
v_add_nc_u32_e32 v0, 1, v0
v_add_co_ci_u32_e32 v2, vcc_lo, 0, v2, vcc_lo
v_add_co_u32 v3, vcc_lo, v3, 4
v_add_co_ci_u32_e32 v4, vcc_lo, 0, v4, vcc_lo
s_delay_alu instid0(VALU_DEP_4)
v_cmp_ge_i32_e32 vcc_lo, v0, v7
s_or_b32 s2, vcc_lo, s2
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v8, v9, v8
global_store_b32 v[5:6], v8, off
v_add_co_u32 v5, s0, v5, 4
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v6, s0, 0, v6, s0
s_and_not1_b32 exec_lo, exec_lo, s2
s_cbranch_execnz .LBB0_2
.LBB0_3:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z13good_additionPiS_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 10
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z13good_additionPiS_S_i, .Lfunc_end0-_Z13good_additionPiS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z13good_additionPiS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z13good_additionPiS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 10
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
/*
Vector addition with a single thread for each addition
*/
/*
Vector addition with thread mapping and thread accessing its neighbor parallely
*/
//slower than simpler
/*
Matrix Matrix multiplication with a single thread for each row
*/
/*
Matrix Matrix multiplication with a single thread for each result element
*/
/*
Matrix Vector multiplication with a block with 4 threads per block, shared block mem and parallel reduce
*/
__global__ void good_addition(int *a, int *b, int *c, int len)
{
int tid= threadIdx.x + blockIdx.x * blockDim.x;
const int thread_count= blockDim.x*gridDim.x;
int step = len/thread_count;
int start_index = tid*step;
int end_index= (tid+1)* step;
if (tid==thread_count-1) end_index=len;
//printf("Step is %d\n",step);
while(start_index< end_index)
{
c[start_index]=a[start_index]+b[start_index];
//printf("I am block: %d with tid: %d Result %d \n",blockIdx.x,tid,c[tid]);
start_index +=1;
}
} | .text
.file "good_addition.hip"
.globl _Z28__device_stub__good_additionPiS_S_i # -- Begin function _Z28__device_stub__good_additionPiS_S_i
.p2align 4, 0x90
.type _Z28__device_stub__good_additionPiS_S_i,@function
_Z28__device_stub__good_additionPiS_S_i: # @_Z28__device_stub__good_additionPiS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z13good_additionPiS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z28__device_stub__good_additionPiS_S_i, .Lfunc_end0-_Z28__device_stub__good_additionPiS_S_i
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z13good_additionPiS_S_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z13good_additionPiS_S_i,@object # @_Z13good_additionPiS_S_i
.section .rodata,"a",@progbits
.globl _Z13good_additionPiS_S_i
.p2align 3, 0x0
_Z13good_additionPiS_S_i:
.quad _Z28__device_stub__good_additionPiS_S_i
.size _Z13good_additionPiS_S_i, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z13good_additionPiS_S_i"
.size .L__unnamed_1, 25
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z28__device_stub__good_additionPiS_S_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z13good_additionPiS_S_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z13good_additionPiS_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff047624 */
/* 0x000fe200078e00ff */
/*0020*/ IABS R8, c[0x0][0x178] ; /* 0x00005e0000087a13 */
/* 0x000fc60000000000 */
/*0030*/ IMAD R4, R4, c[0x0][0xc], RZ ; /* 0x0000030004047a24 */
/* 0x000fca00078e02ff */
/*0040*/ IABS R5, R4.reuse ; /* 0x0000000400057213 */
/* 0x080fe40000000000 */
/*0050*/ IABS R9, R4 ; /* 0x0000000400097213 */
/* 0x000fe40000000000 */
/*0060*/ I2F.RP R0, R5 ; /* 0x0000000500007306 */
/* 0x000e300000209400 */
/*0070*/ MUFU.RCP R0, R0 ; /* 0x0000000000007308 */
/* 0x001e240000001000 */
/*0080*/ IADD3 R2, R0, 0xffffffe, RZ ; /* 0x0ffffffe00027810 */
/* 0x001fe20007ffe0ff */
/*0090*/ IMAD.MOV R0, RZ, RZ, -R9 ; /* 0x000000ffff007224 */
/* 0x000fca00078e0a09 */
/*00a0*/ F2I.FTZ.U32.TRUNC.NTZ R3, R2 ; /* 0x0000000200037305 */
/* 0x000064000021f000 */
/*00b0*/ IMAD.MOV.U32 R2, RZ, RZ, RZ ; /* 0x000000ffff027224 */
/* 0x001fe400078e00ff */
/*00c0*/ IMAD.MOV R6, RZ, RZ, -R3 ; /* 0x000000ffff067224 */
/* 0x002fc800078e0a03 */
/*00d0*/ IMAD R7, R6, R5, RZ ; /* 0x0000000506077224 */
/* 0x000fe200078e02ff */
/*00e0*/ MOV R6, R8 ; /* 0x0000000800067202 */
/* 0x000fc60000000f00 */
/*00f0*/ IMAD.HI.U32 R3, R3, R7, R2 ; /* 0x0000000703037227 */
/* 0x000fe200078e0002 */
/*0100*/ LOP3.LUT R2, R4, c[0x0][0x178], RZ, 0x3c, !PT ; /* 0x00005e0004027a12 */
/* 0x000fc800078e3cff */
/*0110*/ ISETP.GE.AND P2, PT, R2, RZ, PT ; /* 0x000000ff0200720c */
/* 0x000fe20003f46270 */
/*0120*/ IMAD.HI.U32 R3, R3, R6, RZ ; /* 0x0000000603037227 */
/* 0x000fc800078e00ff */
/*0130*/ IMAD R0, R3, R0, R6 ; /* 0x0000000003007224 */
/* 0x000fca00078e0206 */
/*0140*/ ISETP.GT.U32.AND P1, PT, R5, R0, PT ; /* 0x000000000500720c */
/* 0x000fda0003f24070 */
/*0150*/ @!P1 IMAD.IADD R0, R0, 0x1, -R5 ; /* 0x0000000100009824 */
/* 0x000fe200078e0a05 */
/*0160*/ @!P1 IADD3 R3, R3, 0x1, RZ ; /* 0x0000000103039810 */
/* 0x000fe40007ffe0ff */
/*0170*/ ISETP.NE.AND P1, PT, R4, RZ, PT ; /* 0x000000ff0400720c */
/* 0x000fe40003f25270 */
/*0180*/ ISETP.GE.U32.AND P0, PT, R0, R5, PT ; /* 0x000000050000720c */
/* 0x000fe40003f06070 */
/*0190*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*01a0*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000e2e0000002100 */
/*01b0*/ @P0 IADD3 R3, R3, 0x1, RZ ; /* 0x0000000103030810 */
/* 0x000fc80007ffe0ff */
/*01c0*/ @!P2 IADD3 R3, -R3, RZ, RZ ; /* 0x000000ff0303a210 */
/* 0x000fe40007ffe1ff */
/*01d0*/ @!P1 LOP3.LUT R3, RZ, R4, RZ, 0x33, !PT ; /* 0x00000004ff039212 */
/* 0x000fe200078e33ff */
/*01e0*/ IMAD R0, R0, c[0x0][0x0], R5 ; /* 0x0000000000007a24 */
/* 0x001fe200078e0205 */
/*01f0*/ IADD3 R5, R4, -0x1, RZ ; /* 0xffffffff04057810 */
/* 0x000fc60007ffe0ff */
/*0200*/ IMAD R9, R0.reuse, R3, RZ ; /* 0x0000000300097224 */
/* 0x040fe200078e02ff */
/*0210*/ ISETP.NE.AND P0, PT, R0, R5, PT ; /* 0x000000050000720c */
/* 0x000fc60003f05270 */
/*0220*/ IMAD.IADD R0, R3, 0x1, R9 ; /* 0x0000000103007824 */
/* 0x000fca00078e0209 */
/*0230*/ SEL R0, R0, c[0x0][0x178], P0 ; /* 0x00005e0000007a07 */
/* 0x000fc80000000000 */
/*0240*/ ISETP.GT.AND P0, PT, R0, R9, PT ; /* 0x000000090000720c */
/* 0x000fda0003f04270 */
/*0250*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*0260*/ IMAD.IADD R2, R0, 0x1, -R9 ; /* 0x0000000100027824 */
/* 0x000fe200078e0a09 */
/*0270*/ LOP3.LUT R3, RZ, R9, RZ, 0x33, !PT ; /* 0x00000009ff037212 */
/* 0x000fe200078e33ff */
/*0280*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0290*/ BSSY B0, 0x4c0 ; /* 0x0000022000007945 */
/* 0x000fe40003800000 */
/*02a0*/ LOP3.LUT P1, R8, R2, 0x3, RZ, 0xc0, !PT ; /* 0x0000000302087812 */
/* 0x000fe2000782c0ff */
/*02b0*/ IMAD.IADD R3, R0, 0x1, R3 ; /* 0x0000000100037824 */
/* 0x000fca00078e0203 */
/*02c0*/ ISETP.GE.U32.AND P0, PT, R3, 0x3, PT ; /* 0x000000030300780c */
/* 0x000fce0003f06070 */
/*02d0*/ @!P1 BRA 0x4b0 ; /* 0x000001d000009947 */
/* 0x000fea0003800000 */
/*02e0*/ HFMA2.MMA R6, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff067435 */
/* 0x000fd400000001ff */
/*02f0*/ IMAD.WIDE R2, R9, R6, c[0x0][0x170] ; /* 0x00005c0009027625 */
/* 0x000fc800078e0206 */
/*0300*/ IMAD.WIDE R4, R9, R6, c[0x0][0x168] ; /* 0x00005a0009047625 */
/* 0x000fc800078e0206 */
/*0310*/ IMAD.WIDE R6, R9, R6, c[0x0][0x160] ; /* 0x0000580009067625 */
/* 0x000fe200078e0206 */
/*0320*/ MOV R13, R5 ; /* 0x00000005000d7202 */
/* 0x000fc60000000f00 */
/*0330*/ IMAD.MOV.U32 R12, RZ, RZ, R2 ; /* 0x000000ffff0c7224 */
/* 0x000fe400078e0002 */
/*0340*/ IMAD.MOV.U32 R15, RZ, RZ, R3 ; /* 0x000000ffff0f7224 */
/* 0x000fe400078e0003 */
/*0350*/ IMAD.MOV.U32 R10, RZ, RZ, R4 ; /* 0x000000ffff0a7224 */
/* 0x000fe400078e0004 */
/*0360*/ IMAD.MOV.U32 R11, RZ, RZ, R7 ; /* 0x000000ffff0b7224 */
/* 0x000fe400078e0007 */
/*0370*/ IMAD.MOV.U32 R2, RZ, RZ, R10 ; /* 0x000000ffff027224 */
/* 0x001fe200078e000a */
/*0380*/ MOV R4, R6 ; /* 0x0000000600047202 */
/* 0x000fe20000000f00 */
/*0390*/ IMAD.MOV.U32 R5, RZ, RZ, R11 ; /* 0x000000ffff057224 */
/* 0x000fe400078e000b */
/*03a0*/ IMAD.MOV.U32 R3, RZ, RZ, R13 ; /* 0x000000ffff037224 */
/* 0x000fc800078e000d */
/*03b0*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea8000c1e1900 */
/*03c0*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x0000a2000c1e1900 */
/*03d0*/ IADD3 R8, R8, -0x1, RZ ; /* 0xffffffff08087810 */
/* 0x000fe40007ffe0ff */
/*03e0*/ MOV R3, R15 ; /* 0x0000000f00037202 */
/* 0x001fe40000000f00 */
/*03f0*/ ISETP.NE.AND P1, PT, R8, RZ, PT ; /* 0x000000ff0800720c */
/* 0x000fe40003f25270 */
/*0400*/ IADD3 R10, P3, R10, 0x4, RZ ; /* 0x000000040a0a7810 */
/* 0x000fc40007f7e0ff */
/*0410*/ IADD3 R6, P4, R6, 0x4, RZ ; /* 0x0000000406067810 */
/* 0x000fe40007f9e0ff */
/*0420*/ IADD3 R9, R9, 0x1, RZ ; /* 0x0000000109097810 */
/* 0x000fe20007ffe0ff */
/*0430*/ IMAD.X R13, RZ, RZ, R13, P3 ; /* 0x000000ffff0d7224 */
/* 0x000fe400018e060d */
/*0440*/ IMAD.X R11, RZ, RZ, R11, P4 ; /* 0x000000ffff0b7224 */
/* 0x000fe400020e060b */
/*0450*/ IMAD.IADD R7, R2, 0x1, R5 ; /* 0x0000000102077824 */
/* 0x004fe400078e0205 */
/*0460*/ IMAD.MOV.U32 R2, RZ, RZ, R12 ; /* 0x000000ffff027224 */
/* 0x000fca00078e000c */
/*0470*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */
/* 0x0001e2000c101904 */
/*0480*/ IADD3 R12, P2, R12, 0x4, RZ ; /* 0x000000040c0c7810 */
/* 0x000fca0007f5e0ff */
/*0490*/ IMAD.X R15, RZ, RZ, R15, P2 ; /* 0x000000ffff0f7224 */
/* 0x000fe200010e060f */
/*04a0*/ @P1 BRA 0x370 ; /* 0xfffffec000001947 */
/* 0x000fea000383ffff */
/*04b0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*04c0*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*04d0*/ HFMA2.MMA R6, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff067435 */
/* 0x000fe200000001ff */
/*04e0*/ IMAD.IADD R3, R0, 0x1, -R9 ; /* 0x0000000100037824 */
/* 0x001fe200078e0a09 */
/*04f0*/ BSSY B0, 0xac0 ; /* 0x000005c000007945 */
/* 0x000fe80003800000 */
/*0500*/ ISETP.GT.AND P1, PT, R3, 0xc, PT ; /* 0x0000000c0300780c */
/* 0x000fc80003f24270 */
/*0510*/ IMAD.WIDE R6, R9, R6, c[0x2][0x0] ; /* 0x0080000009067625 */
/* 0x000fca00078e0206 */
/*0520*/ IADD3 R2, P0, R6.reuse, c[0x0][0x160], RZ ; /* 0x0000580006027a10 */
/* 0x040fe40007f1e0ff */
/*0530*/ IADD3 R4, P2, R6.reuse, c[0x0][0x168], RZ ; /* 0x00005a0006047a10 */
/* 0x040fe40007f5e0ff */
/*0540*/ IADD3 R6, P3, R6, c[0x0][0x170], RZ ; /* 0x00005c0006067a10 */
/* 0x000fe40007f7e0ff */
/*0550*/ IADD3.X R3, R7.reuse, c[0x0][0x164], RZ, P0, !PT ; /* 0x0000590007037a10 */
/* 0x040fe400007fe4ff */
/*0560*/ IADD3.X R5, R7.reuse, c[0x0][0x16c], RZ, P2, !PT ; /* 0x00005b0007057a10 */
/* 0x040fe400017fe4ff */
/*0570*/ IADD3.X R7, R7, c[0x0][0x174], RZ, P3, !PT ; /* 0x00005d0007077a10 */
/* 0x000fc40001ffe4ff */
/*0580*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */
/* 0x000fe20003f0f070 */
/*0590*/ @!P1 BRA 0xab0 ; /* 0x0000051000009947 */
/* 0x000fee0003800000 */
/*05a0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40003f0e170 */
/*05b0*/ IADD3 R8, R0, -0xc, RZ ; /* 0xfffffff400087810 */
/* 0x000fc60007ffe0ff */
/*05c0*/ LDG.E R10, [R4.64+-0x8] ; /* 0xfffff804040a7981 */
/* 0x000ea8000c1e1900 */
/*05d0*/ LDG.E R11, [R2.64+-0x8] ; /* 0xfffff804020b7981 */
/* 0x000ea4000c1e1900 */
/*05e0*/ IMAD.IADD R11, R10, 0x1, R11 ; /* 0x000000010a0b7824 */
/* 0x004fca00078e020b */
/*05f0*/ STG.E [R6.64+-0x8], R11 ; /* 0xfffff80b06007986 */
/* 0x0001e8000c101904 */
/*0600*/ LDG.E R10, [R4.64+-0x4] ; /* 0xfffffc04040a7981 */
/* 0x000ea8000c1e1900 */
/*0610*/ LDG.E R13, [R2.64+-0x4] ; /* 0xfffffc04020d7981 */
/* 0x000ea4000c1e1900 */
/*0620*/ IMAD.IADD R13, R10, 0x1, R13 ; /* 0x000000010a0d7824 */
/* 0x004fca00078e020d */
/*0630*/ STG.E [R6.64+-0x4], R13 ; /* 0xfffffc0d06007986 */
/* 0x0003e8000c101904 */
/*0640*/ LDG.E R10, [R4.64] ; /* 0x00000004040a7981 */
/* 0x000ea8000c1e1900 */
/*0650*/ LDG.E R15, [R2.64] ; /* 0x00000004020f7981 */
/* 0x000ea4000c1e1900 */
/*0660*/ IADD3 R15, R10, R15, RZ ; /* 0x0000000f0a0f7210 */
/* 0x004fca0007ffe0ff */
/*0670*/ STG.E [R6.64], R15 ; /* 0x0000000f06007986 */
/* 0x0005e8000c101904 */
/*0680*/ LDG.E R10, [R4.64+0x4] ; /* 0x00000404040a7981 */
/* 0x000ee8000c1e1900 */
/*0690*/ LDG.E R17, [R2.64+0x4] ; /* 0x0000040402117981 */
/* 0x000ee4000c1e1900 */
/*06a0*/ IMAD.IADD R17, R10, 0x1, R17 ; /* 0x000000010a117824 */
/* 0x008fca00078e0211 */
/*06b0*/ STG.E [R6.64+0x4], R17 ; /* 0x0000041106007986 */
/* 0x0007e8000c101904 */
/*06c0*/ LDG.E R10, [R4.64+0x8] ; /* 0x00000804040a7981 */
/* 0x000f28000c1e1900 */
/*06d0*/ LDG.E R11, [R2.64+0x8] ; /* 0x00000804020b7981 */
/* 0x001f24000c1e1900 */
/*06e0*/ IMAD.IADD R11, R10, 0x1, R11 ; /* 0x000000010a0b7824 */
/* 0x010fca00078e020b */
/*06f0*/ STG.E [R6.64+0x8], R11 ; /* 0x0000080b06007986 */
/* 0x0001e8000c101904 */
/*0700*/ LDG.E R10, [R4.64+0xc] ; /* 0x00000c04040a7981 */
/* 0x000f28000c1e1900 */
/*0710*/ LDG.E R13, [R2.64+0xc] ; /* 0x00000c04020d7981 */
/* 0x002f24000c1e1900 */
/*0720*/ IMAD.IADD R13, R10, 0x1, R13 ; /* 0x000000010a0d7824 */
/* 0x010fca00078e020d */
/*0730*/ STG.E [R6.64+0xc], R13 ; /* 0x00000c0d06007986 */
/* 0x0003e8000c101904 */
/*0740*/ LDG.E R10, [R4.64+0x10] ; /* 0x00001004040a7981 */
/* 0x000f28000c1e1900 */
/*0750*/ LDG.E R15, [R2.64+0x10] ; /* 0x00001004020f7981 */
/* 0x004f24000c1e1900 */
/*0760*/ IADD3 R15, R10, R15, RZ ; /* 0x0000000f0a0f7210 */
/* 0x010fca0007ffe0ff */
/*0770*/ STG.E [R6.64+0x10], R15 ; /* 0x0000100f06007986 */
/* 0x0005e8000c101904 */
/*0780*/ LDG.E R10, [R4.64+0x14] ; /* 0x00001404040a7981 */
/* 0x000f28000c1e1900 */
/*0790*/ LDG.E R17, [R2.64+0x14] ; /* 0x0000140402117981 */
/* 0x008f24000c1e1900 */
/*07a0*/ IMAD.IADD R17, R10, 0x1, R17 ; /* 0x000000010a117824 */
/* 0x010fca00078e0211 */
/*07b0*/ STG.E [R6.64+0x14], R17 ; /* 0x0000141106007986 */
/* 0x0007e8000c101904 */
/*07c0*/ LDG.E R10, [R4.64+0x18] ; /* 0x00001804040a7981 */
/* 0x000f28000c1e1900 */
/*07d0*/ LDG.E R11, [R2.64+0x18] ; /* 0x00001804020b7981 */
/* 0x001f24000c1e1900 */
/*07e0*/ IMAD.IADD R11, R10, 0x1, R11 ; /* 0x000000010a0b7824 */
/* 0x010fca00078e020b */
/*07f0*/ STG.E [R6.64+0x18], R11 ; /* 0x0000180b06007986 */
/* 0x0001e8000c101904 */
/*0800*/ LDG.E R10, [R4.64+0x1c] ; /* 0x00001c04040a7981 */
/* 0x000f28000c1e1900 */
/*0810*/ LDG.E R13, [R2.64+0x1c] ; /* 0x00001c04020d7981 */
/* 0x002f24000c1e1900 */
/*0820*/ IMAD.IADD R13, R10, 0x1, R13 ; /* 0x000000010a0d7824 */
/* 0x010fca00078e020d */
/*0830*/ STG.E [R6.64+0x1c], R13 ; /* 0x00001c0d06007986 */
/* 0x0003e8000c101904 */
/*0840*/ LDG.E R10, [R4.64+0x20] ; /* 0x00002004040a7981 */
/* 0x000f28000c1e1900 */
/*0850*/ LDG.E R15, [R2.64+0x20] ; /* 0x00002004020f7981 */
/* 0x004f24000c1e1900 */
/*0860*/ IADD3 R15, R10, R15, RZ ; /* 0x0000000f0a0f7210 */
/* 0x010fca0007ffe0ff */
/*0870*/ STG.E [R6.64+0x20], R15 ; /* 0x0000200f06007986 */
/* 0x0005e8000c101904 */
/*0880*/ LDG.E R10, [R4.64+0x24] ; /* 0x00002404040a7981 */
/* 0x000f28000c1e1900 */
/*0890*/ LDG.E R17, [R2.64+0x24] ; /* 0x0000240402117981 */
/* 0x008f24000c1e1900 */
/*08a0*/ IMAD.IADD R17, R10, 0x1, R17 ; /* 0x000000010a117824 */
/* 0x010fca00078e0211 */
/*08b0*/ STG.E [R6.64+0x24], R17 ; /* 0x0000241106007986 */
/* 0x0007e8000c101904 */
/*08c0*/ LDG.E R10, [R4.64+0x28] ; /* 0x00002804040a7981 */
/* 0x000f28000c1e1900 */
/*08d0*/ LDG.E R11, [R2.64+0x28] ; /* 0x00002804020b7981 */
/* 0x001f24000c1e1900 */
/*08e0*/ IMAD.IADD R11, R10, 0x1, R11 ; /* 0x000000010a0b7824 */
/* 0x010fca00078e020b */
/*08f0*/ STG.E [R6.64+0x28], R11 ; /* 0x0000280b06007986 */
/* 0x000fe8000c101904 */
/*0900*/ LDG.E R10, [R4.64+0x2c] ; /* 0x00002c04040a7981 */
/* 0x000f28000c1e1900 */
/*0910*/ LDG.E R13, [R2.64+0x2c] ; /* 0x00002c04020d7981 */
/* 0x002f24000c1e1900 */
/*0920*/ IMAD.IADD R13, R10, 0x1, R13 ; /* 0x000000010a0d7824 */
/* 0x010fca00078e020d */
/*0930*/ STG.E [R6.64+0x2c], R13 ; /* 0x00002c0d06007986 */
/* 0x0001e8000c101904 */
/*0940*/ LDG.E R10, [R4.64+0x30] ; /* 0x00003004040a7981 */
/* 0x000f28000c1e1900 */
/*0950*/ LDG.E R15, [R2.64+0x30] ; /* 0x00003004020f7981 */
/* 0x004f22000c1e1900 */
/*0960*/ IADD3 R9, R9, 0x10, RZ ; /* 0x0000001009097810 */
/* 0x000fe40007ffe0ff */
/*0970*/ IADD3 R15, R10, R15, RZ ; /* 0x0000000f0a0f7210 */
/* 0x010fca0007ffe0ff */
/*0980*/ STG.E [R6.64+0x30], R15 ; /* 0x0000300f06007986 */
/* 0x0003e8000c101904 */
/*0990*/ LDG.E R10, [R4.64+0x34] ; /* 0x00003404040a7981 */
/* 0x000528000c1e1900 */
/*09a0*/ LDG.E R17, [R2.64+0x34] ; /* 0x0000340402117981 */
/* 0x008722000c1e1900 */
/*09b0*/ ISETP.GE.AND P1, PT, R9, R8, PT ; /* 0x000000080900720c */
/* 0x000fe40003f26270 */
/*09c0*/ IADD3 R12, P3, R4, 0x40, RZ ; /* 0x00000040040c7810 */
/* 0x000fc40007f7e0ff */
/*09d0*/ IADD3 R14, P2, R2, 0x40, RZ ; /* 0x00000040020e7810 */
/* 0x000fc60007f5e0ff */
/*09e0*/ IMAD.X R13, RZ, RZ, R5, P3 ; /* 0x000000ffff0d7224 */
/* 0x001fe200018e0605 */
/*09f0*/ IADD3.X R15, RZ, R3, RZ, P2, !PT ; /* 0x00000003ff0f7210 */
/* 0x002fe200017fe4ff */
/*0a00*/ IMAD.MOV.U32 R4, RZ, RZ, R12 ; /* 0x000000ffff047224 */
/* 0x004fe400078e000c */
/*0a10*/ IMAD.MOV.U32 R2, RZ, RZ, R14 ; /* 0x000000ffff027224 */
/* 0x008fe200078e000e */
/*0a20*/ MOV R5, R13 ; /* 0x0000000d00057202 */
/* 0x000fe20000000f00 */
/*0a30*/ IMAD.MOV.U32 R3, RZ, RZ, R15 ; /* 0x000000ffff037224 */
/* 0x000fe400078e000f */
/*0a40*/ IMAD.IADD R17, R10, 0x1, R17 ; /* 0x000000010a117824 */
/* 0x010fe200078e0211 */
/*0a50*/ IADD3 R10, P4, R6, 0x40, RZ ; /* 0x00000040060a7810 */
/* 0x000fc80007f9e0ff */
/*0a60*/ STG.E [R6.64+0x34], R17 ; /* 0x0000341106007986 */
/* 0x0001e2000c101904 */
/*0a70*/ IMAD.X R11, RZ, RZ, R7, P4 ; /* 0x000000ffff0b7224 */
/* 0x000fe200020e0607 */
/*0a80*/ MOV R6, R10 ; /* 0x0000000a00067202 */
/* 0x001fc60000000f00 */
/*0a90*/ IMAD.MOV.U32 R7, RZ, RZ, R11 ; /* 0x000000ffff077224 */
/* 0x000fe200078e000b */
/*0aa0*/ @!P1 BRA 0x5c0 ; /* 0xfffffb1000009947 */
/* 0x000fea000383ffff */
/*0ab0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0ac0*/ IMAD.IADD R8, R0, 0x1, -R9 ; /* 0x0000000100087824 */
/* 0x000fe200078e0a09 */
/*0ad0*/ BSSY B0, 0xde0 ; /* 0x0000030000007945 */
/* 0x000fe80003800000 */
/*0ae0*/ ISETP.GT.AND P1, PT, R8, 0x4, PT ; /* 0x000000040800780c */
/* 0x000fda0003f24270 */
/*0af0*/ @!P1 BRA 0xdd0 ; /* 0x000002d000009947 */
/* 0x000fea0003800000 */
/*0b00*/ LDG.E R8, [R4.64+-0x8] ; /* 0xfffff80404087981 */
/* 0x000ea8000c1e1900 */
/*0b10*/ LDG.E R11, [R2.64+-0x8] ; /* 0xfffff804020b7981 */
/* 0x000ea4000c1e1900 */
/*0b20*/ IADD3 R11, R8, R11, RZ ; /* 0x0000000b080b7210 */
/* 0x004fca0007ffe0ff */
/*0b30*/ STG.E [R6.64+-0x8], R11 ; /* 0xfffff80b06007986 */
/* 0x0001e8000c101904 */
/*0b40*/ LDG.E R8, [R4.64+-0x4] ; /* 0xfffffc0404087981 */
/* 0x000ea8000c1e1900 */
/*0b50*/ LDG.E R13, [R2.64+-0x4] ; /* 0xfffffc04020d7981 */
/* 0x000ea4000c1e1900 */
/*0b60*/ IMAD.IADD R13, R8, 0x1, R13 ; /* 0x00000001080d7824 */
/* 0x004fca00078e020d */
/*0b70*/ STG.E [R6.64+-0x4], R13 ; /* 0xfffffc0d06007986 */
/* 0x0003e8000c101904 */
/*0b80*/ LDG.E R8, [R4.64] ; /* 0x0000000404087981 */
/* 0x000ea8000c1e1900 */
/*0b90*/ LDG.E R15, [R2.64] ; /* 0x00000004020f7981 */
/* 0x000ea4000c1e1900 */
/*0ba0*/ IMAD.IADD R15, R8, 0x1, R15 ; /* 0x00000001080f7824 */
/* 0x004fca00078e020f */
/*0bb0*/ STG.E [R6.64], R15 ; /* 0x0000000f06007986 */
/* 0x0005e8000c101904 */
/*0bc0*/ LDG.E R8, [R4.64+0x4] ; /* 0x0000040404087981 */
/* 0x000ee8000c1e1900 */
/*0bd0*/ LDG.E R17, [R2.64+0x4] ; /* 0x0000040402117981 */
/* 0x000ee4000c1e1900 */
/*0be0*/ IADD3 R17, R8, R17, RZ ; /* 0x0000001108117210 */
/* 0x008fca0007ffe0ff */
/*0bf0*/ STG.E [R6.64+0x4], R17 ; /* 0x0000041106007986 */
/* 0x0007e8000c101904 */
/*0c00*/ LDG.E R8, [R4.64+0x8] ; /* 0x0000080404087981 */
/* 0x000f28000c1e1900 */
/*0c10*/ LDG.E R11, [R2.64+0x8] ; /* 0x00000804020b7981 */
/* 0x001f24000c1e1900 */
/*0c20*/ IMAD.IADD R11, R8, 0x1, R11 ; /* 0x00000001080b7824 */
/* 0x010fca00078e020b */
/*0c30*/ STG.E [R6.64+0x8], R11 ; /* 0x0000080b06007986 */
/* 0x000fe8000c101904 */
/*0c40*/ LDG.E R8, [R4.64+0xc] ; /* 0x00000c0404087981 */
/* 0x000f28000c1e1900 */
/*0c50*/ LDG.E R13, [R2.64+0xc] ; /* 0x00000c04020d7981 */
/* 0x002f24000c1e1900 */
/*0c60*/ IMAD.IADD R13, R8, 0x1, R13 ; /* 0x00000001080d7824 */
/* 0x010fca00078e020d */
/*0c70*/ STG.E [R6.64+0xc], R13 ; /* 0x00000c0d06007986 */
/* 0x0001e8000c101904 */
/*0c80*/ LDG.E R8, [R4.64+0x10] ; /* 0x0000100404087981 */
/* 0x000f28000c1e1900 */
/*0c90*/ LDG.E R15, [R2.64+0x10] ; /* 0x00001004020f7981 */
/* 0x004f24000c1e1900 */
/*0ca0*/ IADD3 R15, R8, R15, RZ ; /* 0x0000000f080f7210 */
/* 0x010fca0007ffe0ff */
/*0cb0*/ STG.E [R6.64+0x10], R15 ; /* 0x0000100f06007986 */
/* 0x000fe8000c101904 */
/*0cc0*/ LDG.E R8, [R4.64+0x14] ; /* 0x0000140404087981 */
/* 0x0002a8000c1e1900 */
/*0cd0*/ LDG.E R17, [R2.64+0x14] ; /* 0x0000140402117981 */
/* 0x0086a2000c1e1900 */
/*0ce0*/ IADD3 R12, P1, R2, 0x20, RZ ; /* 0x00000020020c7810 */
/* 0x000fe40007f3e0ff */
/*0cf0*/ IADD3 R10, P2, R4, 0x20, RZ ; /* 0x00000020040a7810 */
/* 0x000fc40007f5e0ff */
/*0d00*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe20003f0e170 */
/*0d10*/ IMAD.X R13, RZ, RZ, R3, P1 ; /* 0x000000ffff0d7224 */
/* 0x001fe200008e0603 */
/*0d20*/ IADD3.X R5, RZ, R5, RZ, P2, !PT ; /* 0x00000005ff057210 */
/* 0x002fe200017fe4ff */
/*0d30*/ IMAD.MOV.U32 R4, RZ, RZ, R10 ; /* 0x000000ffff047224 */
/* 0x000fe200078e000a */
/*0d40*/ IADD3 R9, R9, 0x8, RZ ; /* 0x0000000809097810 */
/* 0x000fe20007ffe0ff */
/*0d50*/ IMAD.MOV.U32 R3, RZ, RZ, R13 ; /* 0x000000ffff037224 */
/* 0x008fe200078e000d */
/*0d60*/ MOV R2, R12 ; /* 0x0000000c00027202 */
/* 0x000fe20000000f00 */
/*0d70*/ IMAD.IADD R17, R8, 0x1, R17 ; /* 0x0000000108117824 */
/* 0x004fe200078e0211 */
/*0d80*/ IADD3 R8, P3, R6, 0x20, RZ ; /* 0x0000002006087810 */
/* 0x000fc80007f7e0ff */
/*0d90*/ STG.E [R6.64+0x14], R17 ; /* 0x0000141106007986 */
/* 0x0001e2000c101904 */
/*0da0*/ IMAD.X R11, RZ, RZ, R7, P3 ; /* 0x000000ffff0b7224 */
/* 0x000fe400018e0607 */
/*0db0*/ IMAD.MOV.U32 R6, RZ, RZ, R8 ; /* 0x000000ffff067224 */
/* 0x001fc600078e0008 */
/*0dc0*/ MOV R7, R11 ; /* 0x0000000b00077202 */
/* 0x000fe40000000f00 */
/*0dd0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0de0*/ ISETP.LT.OR P0, PT, R9, R0, P0 ; /* 0x000000000900720c */
/* 0x000fda0000701670 */
/*0df0*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*0e00*/ LDG.E R0, [R4.64+-0x8] ; /* 0xfffff80404007981 */
/* 0x000ea8000c1e1900 */
/*0e10*/ LDG.E R9, [R2.64+-0x8] ; /* 0xfffff80402097981 */
/* 0x000ea4000c1e1900 */
/*0e20*/ IMAD.IADD R9, R0, 0x1, R9 ; /* 0x0000000100097824 */
/* 0x004fca00078e0209 */
/*0e30*/ STG.E [R6.64+-0x8], R9 ; /* 0xfffff80906007986 */
/* 0x000fe8000c101904 */
/*0e40*/ LDG.E R0, [R4.64+-0x4] ; /* 0xfffffc0404007981 */
/* 0x000ea8000c1e1900 */
/*0e50*/ LDG.E R11, [R2.64+-0x4] ; /* 0xfffffc04020b7981 */
/* 0x000ea4000c1e1900 */
/*0e60*/ IMAD.IADD R11, R0, 0x1, R11 ; /* 0x00000001000b7824 */
/* 0x004fca00078e020b */
/*0e70*/ STG.E [R6.64+-0x4], R11 ; /* 0xfffffc0b06007986 */
/* 0x000fe8000c101904 */
/*0e80*/ LDG.E R0, [R4.64] ; /* 0x0000000404007981 */
/* 0x000ea8000c1e1900 */
/*0e90*/ LDG.E R13, [R2.64] ; /* 0x00000004020d7981 */
/* 0x000ea4000c1e1900 */
/*0ea0*/ IADD3 R13, R0, R13, RZ ; /* 0x0000000d000d7210 */
/* 0x004fca0007ffe0ff */
/*0eb0*/ STG.E [R6.64], R13 ; /* 0x0000000d06007986 */
/* 0x000fe8000c101904 */
/*0ec0*/ LDG.E R0, [R4.64+0x4] ; /* 0x0000040404007981 */
/* 0x000ea8000c1e1900 */
/*0ed0*/ LDG.E R15, [R2.64+0x4] ; /* 0x00000404020f7981 */
/* 0x000ea4000c1e1900 */
/*0ee0*/ IMAD.IADD R15, R0, 0x1, R15 ; /* 0x00000001000f7824 */
/* 0x004fca00078e020f */
/*0ef0*/ STG.E [R6.64+0x4], R15 ; /* 0x0000040f06007986 */
/* 0x000fe2000c101904 */
/*0f00*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0f10*/ BRA 0xf10; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0f20*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0f30*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0f40*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0f50*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0f60*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0f70*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0f80*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0f90*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0fa0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0fb0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0fc0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0fd0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0fe0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ff0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z13good_additionPiS_S_i
.globl _Z13good_additionPiS_S_i
.p2align 8
.type _Z13good_additionPiS_S_i,@function
_Z13good_additionPiS_S_i:
s_clause 0x2
s_load_b32 s2, s[0:1], 0x2c
s_load_b32 s3, s[0:1], 0x20
s_load_b32 s6, s[0:1], 0x18
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_ashr_i32 s9, s6, 31
s_mul_i32 s3, s3, s2
s_add_i32 s10, s6, s9
s_ashr_i32 s4, s3, 31
s_xor_b32 s10, s10, s9
s_add_i32 s5, s3, s4
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
s_xor_b32 s5, s5, s4
s_xor_b32 s4, s9, s4
v_cvt_f32_u32_e32 v1, s5
s_sub_i32 s8, 0, s5
v_rcp_iflag_f32_e32 v1, v1
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v1, 0x4f7ffffe, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cvt_u32_f32_e32 v1, v1
v_readfirstlane_b32 s7, v1
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_mul_i32 s8, s8, s7
s_mul_hi_u32 s8, s7, s8
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_add_i32 s7, s7, s8
s_mul_hi_u32 s7, s10, s7
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
s_mul_i32 s8, s7, s5
s_add_i32 s9, s7, 1
s_sub_i32 s8, s10, s8
s_sub_i32 s10, s8, s5
s_cmp_ge_u32 s8, s5
s_cselect_b32 s2, s9, s7
s_cselect_b32 s7, s10, s8
s_add_i32 s8, s2, 1
s_cmp_ge_u32 s7, s5
s_cselect_b32 s2, s8, s2
s_add_i32 s3, s3, -1
s_xor_b32 s2, s2, s4
v_cmp_eq_u32_e32 vcc_lo, s3, v1
s_sub_i32 s2, s2, s4
s_mov_b32 s3, exec_lo
v_mul_lo_u32 v0, v1, s2
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v2, s2, v0
s_mov_b32 s2, 0
v_cndmask_b32_e64 v7, v2, s6, vcc_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_lt_i32_e64 v0, v7
s_cbranch_execz .LBB0_3
s_clause 0x1
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[0:1], s[0:1], 0x10
v_ashrrev_i32_e32 v1, 31, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[5:6], 2, v[0:1]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v1, vcc_lo, s4, v5
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v2, vcc_lo, s5, v6, vcc_lo
v_add_co_u32 v3, vcc_lo, s6, v5
v_add_co_ci_u32_e32 v4, vcc_lo, s7, v6, vcc_lo
v_add_co_u32 v5, vcc_lo, s0, v5
v_add_co_ci_u32_e32 v6, vcc_lo, s1, v6, vcc_lo
.p2align 6
.LBB0_2:
global_load_b32 v8, v[1:2], off
global_load_b32 v9, v[3:4], off
v_add_co_u32 v1, vcc_lo, v1, 4
v_add_nc_u32_e32 v0, 1, v0
v_add_co_ci_u32_e32 v2, vcc_lo, 0, v2, vcc_lo
v_add_co_u32 v3, vcc_lo, v3, 4
v_add_co_ci_u32_e32 v4, vcc_lo, 0, v4, vcc_lo
s_delay_alu instid0(VALU_DEP_4)
v_cmp_ge_i32_e32 vcc_lo, v0, v7
s_or_b32 s2, vcc_lo, s2
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v8, v9, v8
global_store_b32 v[5:6], v8, off
v_add_co_u32 v5, s0, v5, 4
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v6, s0, 0, v6, s0
s_and_not1_b32 exec_lo, exec_lo, s2
s_cbranch_execnz .LBB0_2
.LBB0_3:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z13good_additionPiS_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 10
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z13good_additionPiS_S_i, .Lfunc_end0-_Z13good_additionPiS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z13good_additionPiS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z13good_additionPiS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 10
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0006a0f4_00000000-6_good_addition.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z38__device_stub__Z13good_additionPiS_S_iPiS_S_i
.type _Z38__device_stub__Z13good_additionPiS_S_iPiS_S_i, @function
_Z38__device_stub__Z13good_additionPiS_S_iPiS_S_i:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z13good_additionPiS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z38__device_stub__Z13good_additionPiS_S_iPiS_S_i, .-_Z38__device_stub__Z13good_additionPiS_S_iPiS_S_i
.globl _Z13good_additionPiS_S_i
.type _Z13good_additionPiS_S_i, @function
_Z13good_additionPiS_S_i:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z38__device_stub__Z13good_additionPiS_S_iPiS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z13good_additionPiS_S_i, .-_Z13good_additionPiS_S_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z13good_additionPiS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z13good_additionPiS_S_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "good_addition.hip"
.globl _Z28__device_stub__good_additionPiS_S_i # -- Begin function _Z28__device_stub__good_additionPiS_S_i
.p2align 4, 0x90
.type _Z28__device_stub__good_additionPiS_S_i,@function
_Z28__device_stub__good_additionPiS_S_i: # @_Z28__device_stub__good_additionPiS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z13good_additionPiS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z28__device_stub__good_additionPiS_S_i, .Lfunc_end0-_Z28__device_stub__good_additionPiS_S_i
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z13good_additionPiS_S_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z13good_additionPiS_S_i,@object # @_Z13good_additionPiS_S_i
.section .rodata,"a",@progbits
.globl _Z13good_additionPiS_S_i
.p2align 3, 0x0
_Z13good_additionPiS_S_i:
.quad _Z28__device_stub__good_additionPiS_S_i
.size _Z13good_additionPiS_S_i, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z13good_additionPiS_S_i"
.size .L__unnamed_1, 25
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z28__device_stub__good_additionPiS_S_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z13good_additionPiS_S_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | //////////////////////////////////////////////////////////////////////////
////This is the code implementation for Hanon finger exercise -- memory
////Dartmouth COSC89.25/189.03, GPU Programming and High-Performance Computing
//////////////////////////////////////////////////////////////////////////
#include <cstdio>
#include <vector>
#include <iostream>
#include <fstream>
using namespace std;
namespace name
{
std::string team = "Slim_Shaders";
std::string author_1 = "Andrw_Yang";
std::string author_2 = "Matthew_Kenney";
};
ofstream out;
//////////////////////////////////////////////////////////////////////////
////Hanon finger exercise for memory manipulations
////In this exercise you will practice the use of a set of CUDA memory APIs,
//// including cudaMalloc, cudaFree, cudaMemcpy, cudaMemcpyFrom(To)Symbol, and cudaGetSymbolAddress
const int a_host[8] = {1, 2, 3, 4, 5, 6, 7, 8}; ////a_host is an array on host
__device__ const int b_dev[8] = {101, 102, 103, 104, 105, 106, 107, 108}; ////b_dev is an array on device
////Hanon Exercise 12: practice cudaMalloc, cudaMemcpy, and cudaFree
////Expected output: copy a_host from host to device, add each of its elements by 1, store the results in result_host
////Hint:
////0) allocate an array on device with the same size as a_host;
////1) copy a_host from host to device;
////2) write a kernel function to carry out the incremental operation on device;
////3) copy the calculated results on device to result_host (on host)
////4) free the array on device
/*TODO: Your kernel function starts*/
__global__ void Hanon_kernel(int* to_increment)
{
int array_id = blockDim.x * blockIdx.x + threadIdx.x;
to_increment[array_id] = to_increment[array_id] + 1;
}
/*TODO: Your kernel function ends*/
__host__ void Hanon_Exercise_12()
{
int result_host[8] = {0};
int *a_dev = 0;
/*TODO: Your implementation starts*/
cudaMalloc((void**)&a_dev, 8 * sizeof(int));
cudaMemcpy(a_dev, a_host, 8 * sizeof(int), cudaMemcpyHostToDevice);
Hanon_kernel <<< 1, 8, 8 * sizeof(int)>>>(a_dev);
cudaMemcpy(result_host, a_dev, 8 * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(a_dev);
/*TODO: Your implementation ends*/
cout << "Hanon exercise 12:\n";
for(int i = 0; i < 8; i++)cout << result_host[i] << ", ";
cout << endl;
out << "Hanon exercise 12:\n";
for(int i = 0; i < 8; i++)out << result_host[i] << ", ";
out << endl;
}
////Hanon Exercise 13: practice cudaMemcpyFromSymbol
////Expected output: result_host={101,102,103,104,105,106,107,108}
////Process: copy b_dev (the static CUDA device array declared in line 35) to result_host by using cudaMemcpyFromSymbol.
////Hint: b_dev is in static (stack) memory, so you cannot use cudaMemcpy to manipulate it!
__host__ void Hanon_Exercise_13()
{
vector<int> result_host(8, 0);
/*TODO: Your implementation starts*/
cudaMemcpyFromSymbol((void*)&result_host[0], b_dev, 8 * sizeof(int));
/*TODO: Your implementation ends*/
cout << "Hanon exercise 13:\n";
for(int i = 0; i < 8; i++)cout << result_host[i] << ", ";
cout << endl;
out << "Hanon exercise 13:\n";
for(int i = 0; i < 8; i++)out << result_host[i] << ", ";
out << endl;
}
////Hanon Exercise 14: practice manipulating dynamic and static memories together
////Expected output: result_host={101+1,102+2,103+3,104+4,105+5,106+6,107+7,108+8}
////Process: calculate a_host+b_dev (element-wise sum) on device and store the results in result_host
////Hint:
////1) transferring a_host from host to device;
////2) write a kernel function to carry out the element-wise sum for arrays a_host and b_dev
////3) transfer the results from device to result_host (on host)
/*TODO: Your kernel function starts*/
__global__ void Hanon_kernel_14(int* to_increment)
{
int array_id = blockDim.x * blockIdx.x + threadIdx.x;
to_increment[array_id] = to_increment[array_id] + b_dev[array_id];
}
/*TODO: Your kernel function ends*/
__host__ void Hanon_Exercise_14()
{
int result_host[8] = {0};
int *a_dev = 0;
/*TODO: Your implementation starts*/
cudaMalloc((void**)&a_dev, 8 * sizeof(int));
cudaMemcpy(a_dev, a_host, 8 * sizeof(int), cudaMemcpyHostToDevice);
Hanon_kernel_14 <<< 1, 8, 8 * sizeof(int)>>>(a_dev);
cudaMemcpy(result_host, a_dev, 8 * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(a_dev);
/*TODO: Your host function implementation ends*/
cout << "Hanon exercise 14:\n";
for(int i = 0; i < 8; i++)cout << result_host[i] << ", ";
cout << endl;
out << "Hanon exercise 14:\n";
for(int i = 0; i < 8; i++)out << result_host[i] << ", ";
out << endl;
}
////Hanon Exercise 15: practice using shared memory
////Expected output: result_host={1*0+101,2*2+102,3*4+103,4*6+104,5*8+105,6*10+106,7*12+107,8*14+108}
////Process: calculate a_host*s+b_dev and store results in result_host. Here s is an array initialized in shared memory of the kernel function (line 111-113)
////Hint: You need to modify the arguments and the implementation of the function Calculate_Array_With_Shared() to pass in your array(s) and perform calculations
__global__ void Calculate_Array_With_Shared(int* array_from_host) /*TODO: modify the arguments of the kernel function*/
{
__shared__ int s[8];
s[threadIdx.x] = 2 * threadIdx.x;
__syncthreads();
/*TODO: Your kernel implementation starts*/
s[threadIdx.x] = s[threadIdx.x] * array_from_host[threadIdx.x] + b_dev[threadIdx.x];
__syncthreads();
array_from_host[threadIdx.x] = s[threadIdx.x];
/*TODO: Your kernel implementation ends*/
}
__host__ void Hanon_Exercise_15()
{
/*TODO: Your host function implementation starts*/
int result_host[8] = {0};
int *a_dev = 0;
cudaMalloc((void**)&a_dev, 8 * sizeof(int));
cudaMemcpy(a_dev, a_host, 8 * sizeof(int), cudaMemcpyHostToDevice);
Calculate_Array_With_Shared <<< 1, 8, 8 * sizeof(int)>>>(a_dev);
cudaMemcpy(result_host, a_dev, 8 * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(a_dev);
/*TODO: Your host function implementation ends*/
cout << "Hanon exercise 15:\n";
for(int i = 0; i < 8; i++)cout << result_host[i] << ", ";
cout << endl;
out << "Hanon exercise 15:\n";
for(int i = 0; i < 8; i++)out << result_host[i] << ", ";
out << endl;
}
////Hanon Exercise 16: practice cudaGetSymbolAddress
////Expected output: result_host={101*16+1,102*16+1,103*16+1,...,108*16+1}
////Process: apply the following kernel function Manipulate_Array() onto b_dev and store the results in result_host
////*WITHOUT* modifying the implementation in Manipulate_Array() (call it as a blackbox)
////Hint: b_dev is a static array on GPU, you need to get its dynamic pointer by calling cudaGetSymbolAddress, and then send this pointer into the kernel function to update its values
////Note: You are not allowed to modify the implementation in this function!
__global__ void Manipulate_Array(int* array)
{
array[threadIdx.x] *= 16;
array[threadIdx.x] += 1;
}
__host__ void Hanon_Exercise_16()
{
int result_host[8] = {0};
/*TODO: Your host function implementation starts*/
void* devPtr = 0;
cudaGetSymbolAddress(&devPtr, b_dev); // get address of b_dev constant
Manipulate_Array <<< 1, 8, 8 * sizeof(int)>>>((int*)devPtr); // Call Kernel function
cudaMemcpyFromSymbol(result_host, b_dev, sizeof(int) * 8); // Load the result into result_host
/*TODO: Your host function implementation ends*/
cout << "Hanon exercise 16:\n";
for(int i = 0; i < 8; i++)cout << result_host[i] << ", ";
cout << endl;
out << "Hanon exercise 16:\n";
for(int i = 0; i < 8; i++)out << result_host[i] << ", ";
out << endl;
}
////Hanon Exercise 17: practice using shared memory with multiple array types
////Expected output: array_int={208,206,204,202}, array_float={8.,6.,4.,2.},
//// i.e., reverse the order of the int array, multiply each element by 2, and copy its values to the float array (by type conversion),
//// and reverse the order of the float array, multiply each element by 2, and copy its values to the int array (by type conversion)
//// You need to implement this process by using a piece of shared memory holding both two arrays
////Hint: read the sample code we went through in class on Thursday, and mimic its steps as
////1. Initialize two array pointers with the types of int and float to different addresses of the shared memory
////2. Copy the values from array_int and array_float to the proper elements in shared memory
////3. synchronize threads
////4. Copy the values with the proper order and rescaling factor from each array in shared memory to global memory (array_int and array_float)
__global__ void Reverse_And_Multiply_Two_Arrays_With_Extern_Shared(int* array_int, const size_t array_int_size, float* array_float, const size_t array_float_size)
{
extern __shared__ int shared_mem[];
int* ai = (int*)&shared_mem[0];
float* af = (float*)&shared_mem[array_int_size];
/*Your implementation*/
__syncthreads();
// swap the types as we pass into shared memory
ai[threadIdx.x] = (int) array_float[array_float_size - 1 - threadIdx.x] * 2;
af[threadIdx.x] = (float) array_int[array_int_size - 1 - threadIdx.x] * 2.0;
__syncthreads();
// Copy manipulated values back to global memory
array_int[threadIdx.x] = ai[threadIdx.x];
array_float[threadIdx.x] = af[threadIdx.x];
}
__host__ void Hanon_Exercise_17()
{
int array_int_host[4] = {1, 2, 3, 4};
float array_float_host[4] = {101., 102., 103., 104.};
int* array_int_dev = 0;
float* array_float_dev = 0;
cudaMalloc((void**)&array_int_dev, 4 * sizeof(int));
cudaMalloc((void**)&array_float_dev, 4 * sizeof(float));
cudaMemcpy(array_int_dev, array_int_host, 4 * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(array_float_dev, array_float_host, 4 * sizeof(float), cudaMemcpyHostToDevice);
/*Your implementation: comment back the following code with the correct specification for shared memory size (by replacing the * with a proper number) */
Reverse_And_Multiply_Two_Arrays_With_Extern_Shared <<< 1, 4, 4 * sizeof(int) +4 * sizeof(float) >>> (array_int_dev, 4, array_float_dev, 4);
// Copy results back to host memory
cudaMemcpy(array_int_host, array_int_dev, 4 * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(array_float_host, array_float_dev, 4 * sizeof(float), cudaMemcpyDeviceToHost);
cout << "Hanon exercise 17:\n";
for(int i = 0; i < 4; i++)cout << array_int_host[i] << ", ";
cout << endl;
for(int i = 0; i < 4; i++)cout << array_float_host[i] << ", ";
cout << endl;
out << "Hanon exercise 17:\n";
for(int i = 0; i < 4; i++)out << array_int_host[i] << ", ";
out << endl;
for(int i = 0; i < 4; i++)out << array_float_host[i] << ", ";
out << endl;
}
////Congratulations! You have finished all your Hanon exercises today!
//////////////////////////////////////////////////////////////////////////
void Hanon_Exercise_Test_Memory()
{
Hanon_Exercise_12();
Hanon_Exercise_13();
Hanon_Exercise_14();
Hanon_Exercise_15();
Hanon_Exercise_16();
Hanon_Exercise_17();
}
int main()
{
if(name::team == "Team_X")
{
printf("\nPlease specify your team name and team member names in name::team and name::author to start.\n");
return 0;
}
std::string file_name = name::team + "_exercise_memory.dat";
out.open(file_name);
if(out.fail())
{
printf("\ncannot open file %s to record results\n", file_name.c_str());
return 0;
}
Hanon_Exercise_Test_Memory();
return 0;
} | code for sm_80
Function : _Z50Reverse_And_Multiply_Two_Arrays_With_Extern_SharedPimPfm
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R13, SR_TID.X ; /* 0x00000000000d7919 */
/* 0x000e220000002100 */
/*0020*/ IMAD.MOV.U32 R6, RZ, RZ, c[0x0][0x17c] ; /* 0x00005f00ff067624 */
/* 0x000fe200078e00ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0040*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x0][0x16c] ; /* 0x00005b00ff057624 */
/* 0x000fe200078e00ff */
/*0050*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0060*/ LOP3.LUT R0, RZ, R13, RZ, 0x33, !PT ; /* 0x0000000dff007212 */
/* 0x001fc800078e33ff */
/*0070*/ IADD3 R3, P0, R0.reuse, c[0x0][0x178], RZ ; /* 0x00005e0000037a10 */
/* 0x040fe40007f1e0ff */
/*0080*/ IADD3 R0, P1, R0, c[0x0][0x168], RZ ; /* 0x00005a0000007a10 */
/* 0x000fe40007f3e0ff */
/*0090*/ IADD3.X R6, R6, -0x1, RZ, P0, !PT ; /* 0xffffffff06067810 */
/* 0x000fe400007fe4ff */
/*00a0*/ IADD3.X R5, R5, -0x1, RZ, P1, !PT ; /* 0xffffffff05057810 */
/* 0x000fe40000ffe4ff */
/*00b0*/ LEA R2, P0, R3, c[0x0][0x170], 0x2 ; /* 0x00005c0003027a11 */
/* 0x000fe400078010ff */
/*00c0*/ LEA R4, P1, R0, c[0x0][0x160], 0x2 ; /* 0x0000580000047a11 */
/* 0x000fc400078210ff */
/*00d0*/ LEA.HI.X R3, R3, c[0x0][0x174], R6, 0x2, P0 ; /* 0x00005d0003037a11 */
/* 0x000fe400000f1406 */
/*00e0*/ LEA.HI.X R5, R0, c[0x0][0x164], R5, 0x2, P1 ; /* 0x0000590000057a11 */
/* 0x000fc600008f1405 */
/*00f0*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*0100*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ee2000c1e1900 */
/*0110*/ SHF.L.U32 R6, R13, 0x2, RZ ; /* 0x000000020d067819 */
/* 0x000fe200000006ff */
/*0120*/ IMAD.MOV.U32 R9, RZ, RZ, c[0x0][0x168] ; /* 0x00005a00ff097624 */
/* 0x000fc800078e00ff */
/*0130*/ IMAD R6, R9, 0x4, R6 ; /* 0x0000000409067824 */
/* 0x000fe200078e0206 */
/*0140*/ F2I.TRUNC.NTZ R0, R2 ; /* 0x0000000200007305 */
/* 0x004e30000020f100 */
/*0150*/ I2F R7, R4 ; /* 0x0000000400077306 */
/* 0x0082a20000201400 */
/*0160*/ IMAD.SHL.U32 R0, R0, 0x2, RZ ; /* 0x0000000200007824 */
/* 0x001fe200078e00ff */
/*0170*/ HFMA2.MMA R4, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff047435 */
/* 0x002fc800000001ff */
/*0180*/ STS [R13.X4], R0 ; /* 0x000000000d007388 */
/* 0x000fe20000004800 */
/*0190*/ FADD R7, R7, R7 ; /* 0x0000000707077221 */
/* 0x004fca0000000000 */
/*01a0*/ IMAD.WIDE.U32 R2, R13.reuse, R4.reuse, c[0x0][0x160] ; /* 0x000058000d027625 */
/* 0x0c0fe200078e0004 */
/*01b0*/ STS [R6], R7 ; /* 0x0000000706007388 */
/* 0x000fe60000000800 */
/*01c0*/ IMAD.WIDE.U32 R4, R13, R4, c[0x0][0x170] ; /* 0x00005c000d047625 */
/* 0x000fe200078e0004 */
/*01d0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*01e0*/ LDS R9, [R13.X4] ; /* 0x000000000d097984 */
/* 0x000e280000004800 */
/*01f0*/ LDS R11, [R6] ; /* 0x00000000060b7984 */
/* 0x000e680000000800 */
/*0200*/ STG.E [R2.64], R9 ; /* 0x0000000902007986 */
/* 0x001fe8000c101904 */
/*0210*/ STG.E [R4.64], R11 ; /* 0x0000000b04007986 */
/* 0x002fe2000c101904 */
/*0220*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0230*/ BRA 0x230; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0280*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0290*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z16Manipulate_ArrayPi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R2, SR_TID.X ; /* 0x0000000000027919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0040*/ IMAD.WIDE.U32 R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x001fca00078e0003 */
/*0050*/ LDG.E R0, [R2.64] ; /* 0x0000000402007981 */
/* 0x000ea4000c1e1900 */
/*0060*/ LEA R5, R0, 0x1, 0x4 ; /* 0x0000000100057811 */
/* 0x004fca00078e20ff */
/*0070*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe2000c101904 */
/*0080*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0090*/ BRA 0x90; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z27Calculate_Array_With_SharedPi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R9, SR_TID.X ; /* 0x0000000000097919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R2, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff027435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0040*/ SHF.L.U32 R0, R9, 0x1, RZ ; /* 0x0000000109007819 */
/* 0x001fd000000006ff */
/*0050*/ IMAD.WIDE.U32 R4, R9.reuse, R2.reuse, c[0x4][0x0] ; /* 0x0100000009047625 */
/* 0x0c0fe200078e0002 */
/*0060*/ STS [R9.X4], R0 ; /* 0x0000000009007388 */
/* 0x000fe60000004800 */
/*0070*/ IMAD.WIDE.U32 R2, R9, R2, c[0x0][0x160] ; /* 0x0000580009027625 */
/* 0x000fe200078e0002 */
/*0080*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0090*/ LDG.E.CONSTANT R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e9900 */
/*00a0*/ LDG.E R7, [R2.64] ; /* 0x0000000402077981 */
/* 0x000ea8000c1e1900 */
/*00b0*/ LDS R6, [R9.X4] ; /* 0x0000000009067984 */
/* 0x000ea40000004800 */
/*00c0*/ IMAD R6, R6, R7, R4 ; /* 0x0000000706067224 */
/* 0x004fca00078e0204 */
/*00d0*/ STS [R9.X4], R6 ; /* 0x0000000609007388 */
/* 0x000fe80000004800 */
/*00e0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*00f0*/ LDS R7, [R9.X4] ; /* 0x0000000009077984 */
/* 0x000e280000004800 */
/*0100*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */
/* 0x001fe2000c101904 */
/*0110*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0120*/ BRA 0x120; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z15Hanon_kernel_14Pi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */
/* 0x000e220000002500 */
/*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R2, R2, c[0x0][0x0], R3 ; /* 0x0000000002027a24 */
/* 0x001fca00078e0203 */
/*0060*/ IMAD.WIDE R4, R2, R7, c[0x4][0x0] ; /* 0x0100000002047625 */
/* 0x000fc800078e0207 */
/*0070*/ IMAD.WIDE R2, R2, R7, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fe400078e0207 */
/*0080*/ LDG.E.CONSTANT R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea8000c1e9900 */
/*0090*/ LDG.E R0, [R2.64] ; /* 0x0000000402007981 */
/* 0x000ea4000c1e1900 */
/*00a0*/ IADD3 R7, R0, R5, RZ ; /* 0x0000000500077210 */
/* 0x004fca0007ffe0ff */
/*00b0*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */
/* 0x000fe2000c101904 */
/*00c0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00d0*/ BRA 0xd0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z12Hanon_kernelPi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */
/* 0x000e220000002500 */
/*0020*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R2, R2, c[0x0][0x0], R3 ; /* 0x0000000002027a24 */
/* 0x001fca00078e0203 */
/*0060*/ IMAD.WIDE R2, R2, R5, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fca00078e0205 */
/*0070*/ LDG.E R0, [R2.64] ; /* 0x0000000402007981 */
/* 0x000ea4000c1e1900 */
/*0080*/ IADD3 R5, R0, 0x1, RZ ; /* 0x0000000100057810 */
/* 0x004fca0007ffe0ff */
/*0090*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe2000c101904 */
/*00a0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00b0*/ BRA 0xb0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | //////////////////////////////////////////////////////////////////////////
////This is the code implementation for Hanon finger exercise -- memory
////Dartmouth COSC89.25/189.03, GPU Programming and High-Performance Computing
//////////////////////////////////////////////////////////////////////////
#include <cstdio>
#include <vector>
#include <iostream>
#include <fstream>
using namespace std;
namespace name
{
std::string team = "Slim_Shaders";
std::string author_1 = "Andrw_Yang";
std::string author_2 = "Matthew_Kenney";
};
ofstream out;
//////////////////////////////////////////////////////////////////////////
////Hanon finger exercise for memory manipulations
////In this exercise you will practice the use of a set of CUDA memory APIs,
//// including cudaMalloc, cudaFree, cudaMemcpy, cudaMemcpyFrom(To)Symbol, and cudaGetSymbolAddress
const int a_host[8] = {1, 2, 3, 4, 5, 6, 7, 8}; ////a_host is an array on host
__device__ const int b_dev[8] = {101, 102, 103, 104, 105, 106, 107, 108}; ////b_dev is an array on device
////Hanon Exercise 12: practice cudaMalloc, cudaMemcpy, and cudaFree
////Expected output: copy a_host from host to device, add each of its elements by 1, store the results in result_host
////Hint:
////0) allocate an array on device with the same size as a_host;
////1) copy a_host from host to device;
////2) write a kernel function to carry out the incremental operation on device;
////3) copy the calculated results on device to result_host (on host)
////4) free the array on device
/*TODO: Your kernel function starts*/
__global__ void Hanon_kernel(int* to_increment)
{
int array_id = blockDim.x * blockIdx.x + threadIdx.x;
to_increment[array_id] = to_increment[array_id] + 1;
}
/*TODO: Your kernel function ends*/
__host__ void Hanon_Exercise_12()
{
int result_host[8] = {0};
int *a_dev = 0;
/*TODO: Your implementation starts*/
cudaMalloc((void**)&a_dev, 8 * sizeof(int));
cudaMemcpy(a_dev, a_host, 8 * sizeof(int), cudaMemcpyHostToDevice);
Hanon_kernel <<< 1, 8, 8 * sizeof(int)>>>(a_dev);
cudaMemcpy(result_host, a_dev, 8 * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(a_dev);
/*TODO: Your implementation ends*/
cout << "Hanon exercise 12:\n";
for(int i = 0; i < 8; i++)cout << result_host[i] << ", ";
cout << endl;
out << "Hanon exercise 12:\n";
for(int i = 0; i < 8; i++)out << result_host[i] << ", ";
out << endl;
}
////Hanon Exercise 13: practice cudaMemcpyFromSymbol
////Expected output: result_host={101,102,103,104,105,106,107,108}
////Process: copy b_dev (the static CUDA device array declared in line 35) to result_host by using cudaMemcpyFromSymbol.
////Hint: b_dev is in static (stack) memory, so you cannot use cudaMemcpy to manipulate it!
__host__ void Hanon_Exercise_13()
{
vector<int> result_host(8, 0);
/*TODO: Your implementation starts*/
cudaMemcpyFromSymbol((void*)&result_host[0], b_dev, 8 * sizeof(int));
/*TODO: Your implementation ends*/
cout << "Hanon exercise 13:\n";
for(int i = 0; i < 8; i++)cout << result_host[i] << ", ";
cout << endl;
out << "Hanon exercise 13:\n";
for(int i = 0; i < 8; i++)out << result_host[i] << ", ";
out << endl;
}
////Hanon Exercise 14: practice manipulating dynamic and static memories together
////Expected output: result_host={101+1,102+2,103+3,104+4,105+5,106+6,107+7,108+8}
////Process: calculate a_host+b_dev (element-wise sum) on device and store the results in result_host
////Hint:
////1) transferring a_host from host to device;
////2) write a kernel function to carry out the element-wise sum for arrays a_host and b_dev
////3) transfer the results from device to result_host (on host)
/*TODO: Your kernel function starts*/
__global__ void Hanon_kernel_14(int* to_increment)
{
int array_id = blockDim.x * blockIdx.x + threadIdx.x;
to_increment[array_id] = to_increment[array_id] + b_dev[array_id];
}
/*TODO: Your kernel function ends*/
__host__ void Hanon_Exercise_14()
{
int result_host[8] = {0};
int *a_dev = 0;
/*TODO: Your implementation starts*/
cudaMalloc((void**)&a_dev, 8 * sizeof(int));
cudaMemcpy(a_dev, a_host, 8 * sizeof(int), cudaMemcpyHostToDevice);
Hanon_kernel_14 <<< 1, 8, 8 * sizeof(int)>>>(a_dev);
cudaMemcpy(result_host, a_dev, 8 * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(a_dev);
/*TODO: Your host function implementation ends*/
cout << "Hanon exercise 14:\n";
for(int i = 0; i < 8; i++)cout << result_host[i] << ", ";
cout << endl;
out << "Hanon exercise 14:\n";
for(int i = 0; i < 8; i++)out << result_host[i] << ", ";
out << endl;
}
////Hanon Exercise 15: practice using shared memory
////Expected output: result_host={1*0+101,2*2+102,3*4+103,4*6+104,5*8+105,6*10+106,7*12+107,8*14+108}
////Process: calculate a_host*s+b_dev and store results in result_host. Here s is an array initialized in shared memory of the kernel function (line 111-113)
////Hint: You need to modify the arguments and the implementation of the function Calculate_Array_With_Shared() to pass in your array(s) and perform calculations
__global__ void Calculate_Array_With_Shared(int* array_from_host) /*TODO: modify the arguments of the kernel function*/
{
__shared__ int s[8];
s[threadIdx.x] = 2 * threadIdx.x;
__syncthreads();
/*TODO: Your kernel implementation starts*/
s[threadIdx.x] = s[threadIdx.x] * array_from_host[threadIdx.x] + b_dev[threadIdx.x];
__syncthreads();
array_from_host[threadIdx.x] = s[threadIdx.x];
/*TODO: Your kernel implementation ends*/
}
__host__ void Hanon_Exercise_15()
{
/*TODO: Your host function implementation starts*/
int result_host[8] = {0};
int *a_dev = 0;
cudaMalloc((void**)&a_dev, 8 * sizeof(int));
cudaMemcpy(a_dev, a_host, 8 * sizeof(int), cudaMemcpyHostToDevice);
Calculate_Array_With_Shared <<< 1, 8, 8 * sizeof(int)>>>(a_dev);
cudaMemcpy(result_host, a_dev, 8 * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(a_dev);
/*TODO: Your host function implementation ends*/
cout << "Hanon exercise 15:\n";
for(int i = 0; i < 8; i++)cout << result_host[i] << ", ";
cout << endl;
out << "Hanon exercise 15:\n";
for(int i = 0; i < 8; i++)out << result_host[i] << ", ";
out << endl;
}
////Hanon Exercise 16: practice cudaGetSymbolAddress
////Expected output: result_host={101*16+1,102*16+1,103*16+1,...,108*16+1}
////Process: apply the following kernel function Manipulate_Array() onto b_dev and store the results in result_host
////*WITHOUT* modifying the implementation in Manipulate_Array() (call it as a blackbox)
////Hint: b_dev is a static array on GPU, you need to get its dynamic pointer by calling cudaGetSymbolAddress, and then send this pointer into the kernel function to update its values
////Note: You are not allowed to modify the implementation in this function!
__global__ void Manipulate_Array(int* array)
{
array[threadIdx.x] *= 16;
array[threadIdx.x] += 1;
}
__host__ void Hanon_Exercise_16()
{
int result_host[8] = {0};
/*TODO: Your host function implementation starts*/
void* devPtr = 0;
cudaGetSymbolAddress(&devPtr, b_dev); // get address of b_dev constant
Manipulate_Array <<< 1, 8, 8 * sizeof(int)>>>((int*)devPtr); // Call Kernel function
cudaMemcpyFromSymbol(result_host, b_dev, sizeof(int) * 8); // Load the result into result_host
/*TODO: Your host function implementation ends*/
cout << "Hanon exercise 16:\n";
for(int i = 0; i < 8; i++)cout << result_host[i] << ", ";
cout << endl;
out << "Hanon exercise 16:\n";
for(int i = 0; i < 8; i++)out << result_host[i] << ", ";
out << endl;
}
////Hanon Exercise 17: practice using shared memory with multiple array types
////Expected output: array_int={208,206,204,202}, array_float={8.,6.,4.,2.},
//// i.e., reverse the order of the int array, multiply each element by 2, and copy its values to the float array (by type conversion),
//// and reverse the order of the float array, multiply each element by 2, and copy its values to the int array (by type conversion)
//// You need to implement this process by using a piece of shared memory holding both two arrays
////Hint: read the sample code we went through in class on Thursday, and mimic its steps as
////1. Initialize two array pointers with the types of int and float to different addresses of the shared memory
////2. Copy the values from array_int and array_float to the proper elements in shared memory
////3. synchronize threads
////4. Copy the values with the proper order and rescaling factor from each array in shared memory to global memory (array_int and array_float)
__global__ void Reverse_And_Multiply_Two_Arrays_With_Extern_Shared(int* array_int, const size_t array_int_size, float* array_float, const size_t array_float_size)
{
extern __shared__ int shared_mem[];
int* ai = (int*)&shared_mem[0];
float* af = (float*)&shared_mem[array_int_size];
/*Your implementation*/
__syncthreads();
// swap the types as we pass into shared memory
ai[threadIdx.x] = (int) array_float[array_float_size - 1 - threadIdx.x] * 2;
af[threadIdx.x] = (float) array_int[array_int_size - 1 - threadIdx.x] * 2.0;
__syncthreads();
// Copy manipulated values back to global memory
array_int[threadIdx.x] = ai[threadIdx.x];
array_float[threadIdx.x] = af[threadIdx.x];
}
__host__ void Hanon_Exercise_17()
{
int array_int_host[4] = {1, 2, 3, 4};
float array_float_host[4] = {101., 102., 103., 104.};
int* array_int_dev = 0;
float* array_float_dev = 0;
cudaMalloc((void**)&array_int_dev, 4 * sizeof(int));
cudaMalloc((void**)&array_float_dev, 4 * sizeof(float));
cudaMemcpy(array_int_dev, array_int_host, 4 * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(array_float_dev, array_float_host, 4 * sizeof(float), cudaMemcpyHostToDevice);
/*Your implementation: comment back the following code with the correct specification for shared memory size (by replacing the * with a proper number) */
Reverse_And_Multiply_Two_Arrays_With_Extern_Shared <<< 1, 4, 4 * sizeof(int) +4 * sizeof(float) >>> (array_int_dev, 4, array_float_dev, 4);
// Copy results back to host memory
cudaMemcpy(array_int_host, array_int_dev, 4 * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(array_float_host, array_float_dev, 4 * sizeof(float), cudaMemcpyDeviceToHost);
cout << "Hanon exercise 17:\n";
for(int i = 0; i < 4; i++)cout << array_int_host[i] << ", ";
cout << endl;
for(int i = 0; i < 4; i++)cout << array_float_host[i] << ", ";
cout << endl;
out << "Hanon exercise 17:\n";
for(int i = 0; i < 4; i++)out << array_int_host[i] << ", ";
out << endl;
for(int i = 0; i < 4; i++)out << array_float_host[i] << ", ";
out << endl;
}
////Congratulations! You have finished all your Hanon exercises today!
//////////////////////////////////////////////////////////////////////////
void Hanon_Exercise_Test_Memory()
{
Hanon_Exercise_12();
Hanon_Exercise_13();
Hanon_Exercise_14();
Hanon_Exercise_15();
Hanon_Exercise_16();
Hanon_Exercise_17();
}
int main()
{
if(name::team == "Team_X")
{
printf("\nPlease specify your team name and team member names in name::team and name::author to start.\n");
return 0;
}
std::string file_name = name::team + "_exercise_memory.dat";
out.open(file_name);
if(out.fail())
{
printf("\ncannot open file %s to record results\n", file_name.c_str());
return 0;
}
Hanon_Exercise_Test_Memory();
return 0;
} | //////////////////////////////////////////////////////////////////////////
////This is the code implementation for Hanon finger exercise -- memory
////Dartmouth COSC89.25/189.03, GPU Programming and High-Performance Computing
//////////////////////////////////////////////////////////////////////////
#include <hip/hip_runtime.h>
#include <cstdio>
#include <vector>
#include <iostream>
#include <fstream>
using namespace std;
namespace name
{
std::string team = "Slim_Shaders";
std::string author_1 = "Andrw_Yang";
std::string author_2 = "Matthew_Kenney";
};
ofstream out;
//////////////////////////////////////////////////////////////////////////
////Hanon finger exercise for memory manipulations
////In this exercise you will practice the use of a set of CUDA memory APIs,
//// including cudaMalloc, cudaFree, cudaMemcpy, cudaMemcpyFrom(To)Symbol, and cudaGetSymbolAddress
const int a_host[8] = {1, 2, 3, 4, 5, 6, 7, 8}; ////a_host is an array on host
__device__ const int b_dev[8] = {101, 102, 103, 104, 105, 106, 107, 108}; ////b_dev is an array on device
////Hanon Exercise 12: practice cudaMalloc, cudaMemcpy, and cudaFree
////Expected output: copy a_host from host to device, add each of its elements by 1, store the results in result_host
////Hint:
////0) allocate an array on device with the same size as a_host;
////1) copy a_host from host to device;
////2) write a kernel function to carry out the incremental operation on device;
////3) copy the calculated results on device to result_host (on host)
////4) free the array on device
/*TODO: Your kernel function starts*/
__global__ void Hanon_kernel(int* to_increment)
{
int array_id = blockDim.x * blockIdx.x + threadIdx.x;
to_increment[array_id] = to_increment[array_id] + 1;
}
/*TODO: Your kernel function ends*/
__host__ void Hanon_Exercise_12()
{
int result_host[8] = {0};
int *a_dev = 0;
/*TODO: Your implementation starts*/
hipMalloc((void**)&a_dev, 8 * sizeof(int));
hipMemcpy(a_dev, a_host, 8 * sizeof(int), hipMemcpyHostToDevice);
Hanon_kernel <<< 1, 8, 8 * sizeof(int)>>>(a_dev);
hipMemcpy(result_host, a_dev, 8 * sizeof(int), hipMemcpyDeviceToHost);
hipFree(a_dev);
/*TODO: Your implementation ends*/
cout << "Hanon exercise 12:\n";
for(int i = 0; i < 8; i++)cout << result_host[i] << ", ";
cout << endl;
out << "Hanon exercise 12:\n";
for(int i = 0; i < 8; i++)out << result_host[i] << ", ";
out << endl;
}
////Hanon Exercise 13: practice cudaMemcpyFromSymbol
////Expected output: result_host={101,102,103,104,105,106,107,108}
////Process: copy b_dev (the static CUDA device array declared in line 35) to result_host by using cudaMemcpyFromSymbol.
////Hint: b_dev is in static (stack) memory, so you cannot use cudaMemcpy to manipulate it!
__host__ void Hanon_Exercise_13()
{
vector<int> result_host(8, 0);
/*TODO: Your implementation starts*/
hipMemcpyFromSymbol((void*)&result_host[0], HIP_SYMBOL(b_dev), 8 * sizeof(int));
/*TODO: Your implementation ends*/
cout << "Hanon exercise 13:\n";
for(int i = 0; i < 8; i++)cout << result_host[i] << ", ";
cout << endl;
out << "Hanon exercise 13:\n";
for(int i = 0; i < 8; i++)out << result_host[i] << ", ";
out << endl;
}
////Hanon Exercise 14: practice manipulating dynamic and static memories together
////Expected output: result_host={101+1,102+2,103+3,104+4,105+5,106+6,107+7,108+8}
////Process: calculate a_host+b_dev (element-wise sum) on device and store the results in result_host
////Hint:
////1) transferring a_host from host to device;
////2) write a kernel function to carry out the element-wise sum for arrays a_host and b_dev
////3) transfer the results from device to result_host (on host)
/*TODO: Your kernel function starts*/
__global__ void Hanon_kernel_14(int* to_increment)
{
int array_id = blockDim.x * blockIdx.x + threadIdx.x;
to_increment[array_id] = to_increment[array_id] + b_dev[array_id];
}
/*TODO: Your kernel function ends*/
__host__ void Hanon_Exercise_14()
{
int result_host[8] = {0};
int *a_dev = 0;
/*TODO: Your implementation starts*/
hipMalloc((void**)&a_dev, 8 * sizeof(int));
hipMemcpy(a_dev, a_host, 8 * sizeof(int), hipMemcpyHostToDevice);
Hanon_kernel_14 <<< 1, 8, 8 * sizeof(int)>>>(a_dev);
hipMemcpy(result_host, a_dev, 8 * sizeof(int), hipMemcpyDeviceToHost);
hipFree(a_dev);
/*TODO: Your host function implementation ends*/
cout << "Hanon exercise 14:\n";
for(int i = 0; i < 8; i++)cout << result_host[i] << ", ";
cout << endl;
out << "Hanon exercise 14:\n";
for(int i = 0; i < 8; i++)out << result_host[i] << ", ";
out << endl;
}
////Hanon Exercise 15: practice using shared memory
////Expected output: result_host={1*0+101,2*2+102,3*4+103,4*6+104,5*8+105,6*10+106,7*12+107,8*14+108}
////Process: calculate a_host*s+b_dev and store results in result_host. Here s is an array initialized in shared memory of the kernel function (line 111-113)
////Hint: You need to modify the arguments and the implementation of the function Calculate_Array_With_Shared() to pass in your array(s) and perform calculations
__global__ void Calculate_Array_With_Shared(int* array_from_host) /*TODO: modify the arguments of the kernel function*/
{
__shared__ int s[8];
s[threadIdx.x] = 2 * threadIdx.x;
__syncthreads();
/*TODO: Your kernel implementation starts*/
s[threadIdx.x] = s[threadIdx.x] * array_from_host[threadIdx.x] + b_dev[threadIdx.x];
__syncthreads();
array_from_host[threadIdx.x] = s[threadIdx.x];
/*TODO: Your kernel implementation ends*/
}
__host__ void Hanon_Exercise_15()
{
/*TODO: Your host function implementation starts*/
int result_host[8] = {0};
int *a_dev = 0;
hipMalloc((void**)&a_dev, 8 * sizeof(int));
hipMemcpy(a_dev, a_host, 8 * sizeof(int), hipMemcpyHostToDevice);
Calculate_Array_With_Shared <<< 1, 8, 8 * sizeof(int)>>>(a_dev);
hipMemcpy(result_host, a_dev, 8 * sizeof(int), hipMemcpyDeviceToHost);
hipFree(a_dev);
/*TODO: Your host function implementation ends*/
cout << "Hanon exercise 15:\n";
for(int i = 0; i < 8; i++)cout << result_host[i] << ", ";
cout << endl;
out << "Hanon exercise 15:\n";
for(int i = 0; i < 8; i++)out << result_host[i] << ", ";
out << endl;
}
////Hanon Exercise 16: practice cudaGetSymbolAddress
////Expected output: result_host={101*16+1,102*16+1,103*16+1,...,108*16+1}
////Process: apply the following kernel function Manipulate_Array() onto b_dev and store the results in result_host
////*WITHOUT* modifying the implementation in Manipulate_Array() (call it as a blackbox)
////Hint: b_dev is a static array on GPU, you need to get its dynamic pointer by calling cudaGetSymbolAddress, and then send this pointer into the kernel function to update its values
////Note: You are not allowed to modify the implementation in this function!
__global__ void Manipulate_Array(int* array)
{
array[threadIdx.x] *= 16;
array[threadIdx.x] += 1;
}
__host__ void Hanon_Exercise_16()
{
int result_host[8] = {0};
/*TODO: Your host function implementation starts*/
void* devPtr = 0;
hipGetSymbolAddress(&devPtr, HIP_SYMBOL(b_dev)); // get address of b_dev constant
Manipulate_Array <<< 1, 8, 8 * sizeof(int)>>>((int*)devPtr); // Call Kernel function
hipMemcpyFromSymbol(result_host, HIP_SYMBOL(b_dev), sizeof(int) * 8); // Load the result into result_host
/*TODO: Your host function implementation ends*/
cout << "Hanon exercise 16:\n";
for(int i = 0; i < 8; i++)cout << result_host[i] << ", ";
cout << endl;
out << "Hanon exercise 16:\n";
for(int i = 0; i < 8; i++)out << result_host[i] << ", ";
out << endl;
}
////Hanon Exercise 17: practice using shared memory with multiple array types
////Expected output: array_int={208,206,204,202}, array_float={8.,6.,4.,2.},
//// i.e., reverse the order of the int array, multiply each element by 2, and copy its values to the float array (by type conversion),
//// and reverse the order of the float array, multiply each element by 2, and copy its values to the int array (by type conversion)
//// You need to implement this process by using a piece of shared memory holding both two arrays
////Hint: read the sample code we went through in class on Thursday, and mimic its steps as
////1. Initialize two array pointers with the types of int and float to different addresses of the shared memory
////2. Copy the values from array_int and array_float to the proper elements in shared memory
////3. synchronize threads
////4. Copy the values with the proper order and rescaling factor from each array in shared memory to global memory (array_int and array_float)
__global__ void Reverse_And_Multiply_Two_Arrays_With_Extern_Shared(int* array_int, const size_t array_int_size, float* array_float, const size_t array_float_size)
{
extern __shared__ int shared_mem[];
int* ai = (int*)&shared_mem[0];
float* af = (float*)&shared_mem[array_int_size];
/*Your implementation*/
__syncthreads();
// swap the types as we pass into shared memory
ai[threadIdx.x] = (int) array_float[array_float_size - 1 - threadIdx.x] * 2;
af[threadIdx.x] = (float) array_int[array_int_size - 1 - threadIdx.x] * 2.0;
__syncthreads();
// Copy manipulated values back to global memory
array_int[threadIdx.x] = ai[threadIdx.x];
array_float[threadIdx.x] = af[threadIdx.x];
}
__host__ void Hanon_Exercise_17()
{
int array_int_host[4] = {1, 2, 3, 4};
float array_float_host[4] = {101., 102., 103., 104.};
int* array_int_dev = 0;
float* array_float_dev = 0;
hipMalloc((void**)&array_int_dev, 4 * sizeof(int));
hipMalloc((void**)&array_float_dev, 4 * sizeof(float));
hipMemcpy(array_int_dev, array_int_host, 4 * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(array_float_dev, array_float_host, 4 * sizeof(float), hipMemcpyHostToDevice);
/*Your implementation: comment back the following code with the correct specification for shared memory size (by replacing the * with a proper number) */
Reverse_And_Multiply_Two_Arrays_With_Extern_Shared <<< 1, 4, 4 * sizeof(int) +4 * sizeof(float) >>> (array_int_dev, 4, array_float_dev, 4);
// Copy results back to host memory
hipMemcpy(array_int_host, array_int_dev, 4 * sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(array_float_host, array_float_dev, 4 * sizeof(float), hipMemcpyDeviceToHost);
cout << "Hanon exercise 17:\n";
for(int i = 0; i < 4; i++)cout << array_int_host[i] << ", ";
cout << endl;
for(int i = 0; i < 4; i++)cout << array_float_host[i] << ", ";
cout << endl;
out << "Hanon exercise 17:\n";
for(int i = 0; i < 4; i++)out << array_int_host[i] << ", ";
out << endl;
for(int i = 0; i < 4; i++)out << array_float_host[i] << ", ";
out << endl;
}
////Congratulations! You have finished all your Hanon exercises today!
//////////////////////////////////////////////////////////////////////////
void Hanon_Exercise_Test_Memory()
{
Hanon_Exercise_12();
Hanon_Exercise_13();
Hanon_Exercise_14();
Hanon_Exercise_15();
Hanon_Exercise_16();
Hanon_Exercise_17();
}
int main()
{
if(name::team == "Team_X")
{
printf("\nPlease specify your team name and team member names in name::team and name::author to start.\n");
return 0;
}
std::string file_name = name::team + "_exercise_memory.dat";
out.open(file_name);
if(out.fail())
{
printf("\ncannot open file %s to record results\n", file_name.c_str());
return 0;
}
Hanon_Exercise_Test_Memory();
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | //////////////////////////////////////////////////////////////////////////
////This is the code implementation for Hanon finger exercise -- memory
////Dartmouth COSC89.25/189.03, GPU Programming and High-Performance Computing
//////////////////////////////////////////////////////////////////////////
#include <hip/hip_runtime.h>
#include <cstdio>
#include <vector>
#include <iostream>
#include <fstream>
using namespace std;
namespace name
{
std::string team = "Slim_Shaders";
std::string author_1 = "Andrw_Yang";
std::string author_2 = "Matthew_Kenney";
};
ofstream out;
//////////////////////////////////////////////////////////////////////////
////Hanon finger exercise for memory manipulations
////In this exercise you will practice the use of a set of CUDA memory APIs,
//// including cudaMalloc, cudaFree, cudaMemcpy, cudaMemcpyFrom(To)Symbol, and cudaGetSymbolAddress
const int a_host[8] = {1, 2, 3, 4, 5, 6, 7, 8}; ////a_host is an array on host
__device__ const int b_dev[8] = {101, 102, 103, 104, 105, 106, 107, 108}; ////b_dev is an array on device
////Hanon Exercise 12: practice cudaMalloc, cudaMemcpy, and cudaFree
////Expected output: copy a_host from host to device, add each of its elements by 1, store the results in result_host
////Hint:
////0) allocate an array on device with the same size as a_host;
////1) copy a_host from host to device;
////2) write a kernel function to carry out the incremental operation on device;
////3) copy the calculated results on device to result_host (on host)
////4) free the array on device
/*TODO: Your kernel function starts*/
__global__ void Hanon_kernel(int* to_increment)
{
int array_id = blockDim.x * blockIdx.x + threadIdx.x;
to_increment[array_id] = to_increment[array_id] + 1;
}
/*TODO: Your kernel function ends*/
__host__ void Hanon_Exercise_12()
{
int result_host[8] = {0};
int *a_dev = 0;
/*TODO: Your implementation starts*/
hipMalloc((void**)&a_dev, 8 * sizeof(int));
hipMemcpy(a_dev, a_host, 8 * sizeof(int), hipMemcpyHostToDevice);
Hanon_kernel <<< 1, 8, 8 * sizeof(int)>>>(a_dev);
hipMemcpy(result_host, a_dev, 8 * sizeof(int), hipMemcpyDeviceToHost);
hipFree(a_dev);
/*TODO: Your implementation ends*/
cout << "Hanon exercise 12:\n";
for(int i = 0; i < 8; i++)cout << result_host[i] << ", ";
cout << endl;
out << "Hanon exercise 12:\n";
for(int i = 0; i < 8; i++)out << result_host[i] << ", ";
out << endl;
}
////Hanon Exercise 13: practice cudaMemcpyFromSymbol
////Expected output: result_host={101,102,103,104,105,106,107,108}
////Process: copy b_dev (the static CUDA device array declared in line 35) to result_host by using cudaMemcpyFromSymbol.
////Hint: b_dev is in static (stack) memory, so you cannot use cudaMemcpy to manipulate it!
__host__ void Hanon_Exercise_13()
{
vector<int> result_host(8, 0);
/*TODO: Your implementation starts*/
hipMemcpyFromSymbol((void*)&result_host[0], HIP_SYMBOL(b_dev), 8 * sizeof(int));
/*TODO: Your implementation ends*/
cout << "Hanon exercise 13:\n";
for(int i = 0; i < 8; i++)cout << result_host[i] << ", ";
cout << endl;
out << "Hanon exercise 13:\n";
for(int i = 0; i < 8; i++)out << result_host[i] << ", ";
out << endl;
}
////Hanon Exercise 14: practice manipulating dynamic and static memories together
////Expected output: result_host={101+1,102+2,103+3,104+4,105+5,106+6,107+7,108+8}
////Process: calculate a_host+b_dev (element-wise sum) on device and store the results in result_host
////Hint:
////1) transferring a_host from host to device;
////2) write a kernel function to carry out the element-wise sum for arrays a_host and b_dev
////3) transfer the results from device to result_host (on host)
/*TODO: Your kernel function starts*/
__global__ void Hanon_kernel_14(int* to_increment)
{
int array_id = blockDim.x * blockIdx.x + threadIdx.x;
to_increment[array_id] = to_increment[array_id] + b_dev[array_id];
}
/*TODO: Your kernel function ends*/
__host__ void Hanon_Exercise_14()
{
int result_host[8] = {0};
int *a_dev = 0;
/*TODO: Your implementation starts*/
hipMalloc((void**)&a_dev, 8 * sizeof(int));
hipMemcpy(a_dev, a_host, 8 * sizeof(int), hipMemcpyHostToDevice);
Hanon_kernel_14 <<< 1, 8, 8 * sizeof(int)>>>(a_dev);
hipMemcpy(result_host, a_dev, 8 * sizeof(int), hipMemcpyDeviceToHost);
hipFree(a_dev);
/*TODO: Your host function implementation ends*/
cout << "Hanon exercise 14:\n";
for(int i = 0; i < 8; i++)cout << result_host[i] << ", ";
cout << endl;
out << "Hanon exercise 14:\n";
for(int i = 0; i < 8; i++)out << result_host[i] << ", ";
out << endl;
}
////Hanon Exercise 15: practice using shared memory
////Expected output: result_host={1*0+101,2*2+102,3*4+103,4*6+104,5*8+105,6*10+106,7*12+107,8*14+108}
////Process: calculate a_host*s+b_dev and store results in result_host. Here s is an array initialized in shared memory of the kernel function (line 111-113)
////Hint: You need to modify the arguments and the implementation of the function Calculate_Array_With_Shared() to pass in your array(s) and perform calculations
__global__ void Calculate_Array_With_Shared(int* array_from_host) /*TODO: modify the arguments of the kernel function*/
{
__shared__ int s[8];
s[threadIdx.x] = 2 * threadIdx.x;
__syncthreads();
/*TODO: Your kernel implementation starts*/
s[threadIdx.x] = s[threadIdx.x] * array_from_host[threadIdx.x] + b_dev[threadIdx.x];
__syncthreads();
array_from_host[threadIdx.x] = s[threadIdx.x];
/*TODO: Your kernel implementation ends*/
}
__host__ void Hanon_Exercise_15()
{
/*TODO: Your host function implementation starts*/
int result_host[8] = {0};
int *a_dev = 0;
hipMalloc((void**)&a_dev, 8 * sizeof(int));
hipMemcpy(a_dev, a_host, 8 * sizeof(int), hipMemcpyHostToDevice);
Calculate_Array_With_Shared <<< 1, 8, 8 * sizeof(int)>>>(a_dev);
hipMemcpy(result_host, a_dev, 8 * sizeof(int), hipMemcpyDeviceToHost);
hipFree(a_dev);
/*TODO: Your host function implementation ends*/
cout << "Hanon exercise 15:\n";
for(int i = 0; i < 8; i++)cout << result_host[i] << ", ";
cout << endl;
out << "Hanon exercise 15:\n";
for(int i = 0; i < 8; i++)out << result_host[i] << ", ";
out << endl;
}
////Hanon Exercise 16: practice cudaGetSymbolAddress
////Expected output: result_host={101*16+1,102*16+1,103*16+1,...,108*16+1}
////Process: apply the following kernel function Manipulate_Array() onto b_dev and store the results in result_host
////*WITHOUT* modifying the implementation in Manipulate_Array() (call it as a blackbox)
////Hint: b_dev is a static array on GPU, you need to get its dynamic pointer by calling cudaGetSymbolAddress, and then send this pointer into the kernel function to update its values
////Note: You are not allowed to modify the implementation in this function!
__global__ void Manipulate_Array(int* array)
{
array[threadIdx.x] *= 16;
array[threadIdx.x] += 1;
}
__host__ void Hanon_Exercise_16()
{
int result_host[8] = {0};
/*TODO: Your host function implementation starts*/
void* devPtr = 0;
hipGetSymbolAddress(&devPtr, HIP_SYMBOL(b_dev)); // get address of b_dev constant
Manipulate_Array <<< 1, 8, 8 * sizeof(int)>>>((int*)devPtr); // Call Kernel function
hipMemcpyFromSymbol(result_host, HIP_SYMBOL(b_dev), sizeof(int) * 8); // Load the result into result_host
/*TODO: Your host function implementation ends*/
cout << "Hanon exercise 16:\n";
for(int i = 0; i < 8; i++)cout << result_host[i] << ", ";
cout << endl;
out << "Hanon exercise 16:\n";
for(int i = 0; i < 8; i++)out << result_host[i] << ", ";
out << endl;
}
////Hanon Exercise 17: practice using shared memory with multiple array types
////Expected output: array_int={208,206,204,202}, array_float={8.,6.,4.,2.},
//// i.e., reverse the order of the int array, multiply each element by 2, and copy its values to the float array (by type conversion),
//// and reverse the order of the float array, multiply each element by 2, and copy its values to the int array (by type conversion)
//// You need to implement this process by using a piece of shared memory holding both two arrays
////Hint: read the sample code we went through in class on Thursday, and mimic its steps as
////1. Initialize two array pointers with the types of int and float to different addresses of the shared memory
////2. Copy the values from array_int and array_float to the proper elements in shared memory
////3. synchronize threads
////4. Copy the values with the proper order and rescaling factor from each array in shared memory to global memory (array_int and array_float)
__global__ void Reverse_And_Multiply_Two_Arrays_With_Extern_Shared(int* array_int, const size_t array_int_size, float* array_float, const size_t array_float_size)
{
extern __shared__ int shared_mem[];
int* ai = (int*)&shared_mem[0];
float* af = (float*)&shared_mem[array_int_size];
/*Your implementation*/
__syncthreads();
// swap the types as we pass into shared memory
ai[threadIdx.x] = (int) array_float[array_float_size - 1 - threadIdx.x] * 2;
af[threadIdx.x] = (float) array_int[array_int_size - 1 - threadIdx.x] * 2.0;
__syncthreads();
// Copy manipulated values back to global memory
array_int[threadIdx.x] = ai[threadIdx.x];
array_float[threadIdx.x] = af[threadIdx.x];
}
__host__ void Hanon_Exercise_17()
{
int array_int_host[4] = {1, 2, 3, 4};
float array_float_host[4] = {101., 102., 103., 104.};
int* array_int_dev = 0;
float* array_float_dev = 0;
hipMalloc((void**)&array_int_dev, 4 * sizeof(int));
hipMalloc((void**)&array_float_dev, 4 * sizeof(float));
hipMemcpy(array_int_dev, array_int_host, 4 * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(array_float_dev, array_float_host, 4 * sizeof(float), hipMemcpyHostToDevice);
/*Your implementation: comment back the following code with the correct specification for shared memory size (by replacing the * with a proper number) */
Reverse_And_Multiply_Two_Arrays_With_Extern_Shared <<< 1, 4, 4 * sizeof(int) +4 * sizeof(float) >>> (array_int_dev, 4, array_float_dev, 4);
// Copy results back to host memory
hipMemcpy(array_int_host, array_int_dev, 4 * sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(array_float_host, array_float_dev, 4 * sizeof(float), hipMemcpyDeviceToHost);
cout << "Hanon exercise 17:\n";
for(int i = 0; i < 4; i++)cout << array_int_host[i] << ", ";
cout << endl;
for(int i = 0; i < 4; i++)cout << array_float_host[i] << ", ";
cout << endl;
out << "Hanon exercise 17:\n";
for(int i = 0; i < 4; i++)out << array_int_host[i] << ", ";
out << endl;
for(int i = 0; i < 4; i++)out << array_float_host[i] << ", ";
out << endl;
}
////Congratulations! You have finished all your Hanon exercises today!
//////////////////////////////////////////////////////////////////////////
void Hanon_Exercise_Test_Memory()
{
Hanon_Exercise_12();
Hanon_Exercise_13();
Hanon_Exercise_14();
Hanon_Exercise_15();
Hanon_Exercise_16();
Hanon_Exercise_17();
}
int main()
{
if(name::team == "Team_X")
{
printf("\nPlease specify your team name and team member names in name::team and name::author to start.\n");
return 0;
}
std::string file_name = name::team + "_exercise_memory.dat";
out.open(file_name);
if(out.fail())
{
printf("\ncannot open file %s to record results\n", file_name.c_str());
return 0;
}
Hanon_Exercise_Test_Memory();
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z12Hanon_kernelPi
.globl _Z12Hanon_kernelPi
.p2align 8
.type _Z12Hanon_kernelPi,@function
_Z12Hanon_kernelPi:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x14
s_load_b64 s[0:1], s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
v_add_co_u32 v0, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_load_b32 v2, v[0:1], off
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v2, 1, v2
global_store_b32 v[0:1], v2, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z12Hanon_kernelPi
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 264
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z12Hanon_kernelPi, .Lfunc_end0-_Z12Hanon_kernelPi
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z15Hanon_kernel_14Pi
.globl _Z15Hanon_kernel_14Pi
.p2align 8
.type _Z15Hanon_kernel_14Pi,@function
_Z15Hanon_kernel_14Pi:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x14
s_load_b64 s[0:1], s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_getpc_b64 s[2:3]
s_add_u32 s2, s2, _ZL5b_dev@rel32@lo+4
s_addc_u32 s3, s3, _ZL5b_dev@rel32@hi+12
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
v_add_co_u32 v2, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, v0, s2
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
global_load_b32 v4, v[2:3], off
global_load_b32 v0, v[0:1], off
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v0, v0, v4
global_store_b32 v[2:3], v0, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z15Hanon_kernel_14Pi
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 264
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z15Hanon_kernel_14Pi, .Lfunc_end1-_Z15Hanon_kernel_14Pi
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z27Calculate_Array_With_SharedPi
.globl _Z27Calculate_Array_With_SharedPi
.p2align 8
.type _Z27Calculate_Array_With_SharedPi,@function
_Z27Calculate_Array_With_SharedPi:
s_load_b64 s[0:1], s[0:1], 0x0
v_lshlrev_b32_e32 v1, 1, v0
v_lshlrev_b32_e32 v3, 2, v0
s_getpc_b64 s[2:3]
s_add_u32 s2, s2, _ZL5b_dev@rel32@lo+4
s_addc_u32 s3, s3, _ZL5b_dev@rel32@hi+12
ds_store_b32 v3, v1
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_clause 0x1
global_load_b32 v4, v3, s[0:1]
global_load_b32 v0, v3, s[2:3]
ds_load_b32 v5, v3
s_waitcnt vmcnt(0) lgkmcnt(0)
v_mad_u64_u32 v[1:2], null, v4, v5, v[0:1]
ds_store_b32 v3, v1
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
ds_load_b32 v0, v3
s_waitcnt lgkmcnt(0)
global_store_b32 v3, v0, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z27Calculate_Array_With_SharedPi
.amdhsa_group_segment_fixed_size 32
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 8
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 4
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end2:
.size _Z27Calculate_Array_With_SharedPi, .Lfunc_end2-_Z27Calculate_Array_With_SharedPi
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z16Manipulate_ArrayPi
.globl _Z16Manipulate_ArrayPi
.p2align 8
.type _Z16Manipulate_ArrayPi,@function
_Z16Manipulate_ArrayPi:
s_load_b64 s[0:1], s[0:1], 0x0
v_lshlrev_b32_e32 v0, 2, v0
s_waitcnt lgkmcnt(0)
global_load_b32 v1, v0, s[0:1]
s_waitcnt vmcnt(0)
v_lshl_or_b32 v1, v1, 4, 1
global_store_b32 v0, v1, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z16Manipulate_ArrayPi
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 8
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 2
.amdhsa_next_free_sgpr 2
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end3:
.size _Z16Manipulate_ArrayPi, .Lfunc_end3-_Z16Manipulate_ArrayPi
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z50Reverse_And_Multiply_Two_Arrays_With_Extern_SharedPimPfm
.globl _Z50Reverse_And_Multiply_Two_Arrays_With_Extern_SharedPimPfm
.p2align 8
.type _Z50Reverse_And_Multiply_Two_Arrays_With_Extern_SharedPimPfm,@function
_Z50Reverse_And_Multiply_Two_Arrays_With_Extern_SharedPimPfm:
s_load_b256 s[0:7], s[0:1], 0x0
v_not_b32_e32 v1, v0
v_lshlrev_b32_e32 v0, 2, v0
v_mov_b32_e32 v2, -1
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
v_lshlrev_b64 v[1:2], 2, v[1:2]
s_lshl_b64 s[6:7], s[6:7], 2
s_lshl_b32 s8, s2, 2
s_add_u32 s6, s6, s4
s_addc_u32 s7, s7, s5
s_lshl_b64 s[2:3], s[2:3], 2
v_add_co_u32 v3, vcc_lo, s6, v1
s_add_u32 s2, s2, s0
v_add_co_ci_u32_e32 v4, vcc_lo, s7, v2, vcc_lo
s_addc_u32 s3, s3, s1
v_add_co_u32 v1, vcc_lo, s2, v1
v_add_co_ci_u32_e32 v2, vcc_lo, s3, v2, vcc_lo
global_load_b32 v3, v[3:4], off
global_load_b32 v1, v[1:2], off
s_waitcnt vmcnt(1)
v_cvt_i32_f32_e32 v2, v3
s_waitcnt vmcnt(0)
v_cvt_f32_i32_e32 v1, v1
s_delay_alu instid0(VALU_DEP_1)
v_dual_add_f32 v1, v1, v1 :: v_dual_lshlrev_b32 v2, 1, v2
v_add_nc_u32_e32 v3, 0, v0
v_add3_u32 v4, 0, s8, v0
ds_store_b32 v3, v2
ds_store_b32 v4, v1
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
ds_load_b32 v1, v3
ds_load_b32 v2, v4
s_waitcnt lgkmcnt(1)
global_store_b32 v0, v1, s[0:1]
s_waitcnt lgkmcnt(0)
global_store_b32 v0, v2, s[4:5]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z50Reverse_And_Multiply_Two_Arrays_With_Extern_SharedPimPfm
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 32
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 9
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end4:
.size _Z50Reverse_And_Multiply_Two_Arrays_With_Extern_SharedPimPfm, .Lfunc_end4-_Z50Reverse_And_Multiply_Two_Arrays_With_Extern_SharedPimPfm
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type _ZL5b_dev,@object
.section .rodata.cst32,"aM",@progbits,32
.p2align 4, 0x0
_ZL5b_dev:
.long 101
.long 102
.long 103
.long 104
.long 105
.long 106
.long 107
.long 108
.size _ZL5b_dev, 32
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: hidden_block_count_x
- .offset: 12
.size: 4
.value_kind: hidden_block_count_y
- .offset: 16
.size: 4
.value_kind: hidden_block_count_z
- .offset: 20
.size: 2
.value_kind: hidden_group_size_x
- .offset: 22
.size: 2
.value_kind: hidden_group_size_y
- .offset: 24
.size: 2
.value_kind: hidden_group_size_z
- .offset: 26
.size: 2
.value_kind: hidden_remainder_x
- .offset: 28
.size: 2
.value_kind: hidden_remainder_y
- .offset: 30
.size: 2
.value_kind: hidden_remainder_z
- .offset: 48
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 72
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 264
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z12Hanon_kernelPi
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z12Hanon_kernelPi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: hidden_block_count_x
- .offset: 12
.size: 4
.value_kind: hidden_block_count_y
- .offset: 16
.size: 4
.value_kind: hidden_block_count_z
- .offset: 20
.size: 2
.value_kind: hidden_group_size_x
- .offset: 22
.size: 2
.value_kind: hidden_group_size_y
- .offset: 24
.size: 2
.value_kind: hidden_group_size_z
- .offset: 26
.size: 2
.value_kind: hidden_remainder_x
- .offset: 28
.size: 2
.value_kind: hidden_remainder_y
- .offset: 30
.size: 2
.value_kind: hidden_remainder_z
- .offset: 48
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 72
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 264
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z15Hanon_kernel_14Pi
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z15Hanon_kernel_14Pi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 32
.kernarg_segment_align: 8
.kernarg_segment_size: 8
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z27Calculate_Array_With_SharedPi
.private_segment_fixed_size: 0
.sgpr_count: 4
.sgpr_spill_count: 0
.symbol: _Z27Calculate_Array_With_SharedPi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 8
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z16Manipulate_ArrayPi
.private_segment_fixed_size: 0
.sgpr_count: 2
.sgpr_spill_count: 0
.symbol: _Z16Manipulate_ArrayPi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 2
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 8
.value_kind: by_value
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 8
.value_kind: by_value
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 32
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z50Reverse_And_Multiply_Two_Arrays_With_Extern_SharedPimPfm
.private_segment_fixed_size: 0
.sgpr_count: 11
.sgpr_spill_count: 0
.symbol: _Z50Reverse_And_Multiply_Two_Arrays_With_Extern_SharedPimPfm.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z50Reverse_And_Multiply_Two_Arrays_With_Extern_SharedPimPfm
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R13, SR_TID.X ; /* 0x00000000000d7919 */
/* 0x000e220000002100 */
/*0020*/ IMAD.MOV.U32 R6, RZ, RZ, c[0x0][0x17c] ; /* 0x00005f00ff067624 */
/* 0x000fe200078e00ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0040*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x0][0x16c] ; /* 0x00005b00ff057624 */
/* 0x000fe200078e00ff */
/*0050*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0060*/ LOP3.LUT R0, RZ, R13, RZ, 0x33, !PT ; /* 0x0000000dff007212 */
/* 0x001fc800078e33ff */
/*0070*/ IADD3 R3, P0, R0.reuse, c[0x0][0x178], RZ ; /* 0x00005e0000037a10 */
/* 0x040fe40007f1e0ff */
/*0080*/ IADD3 R0, P1, R0, c[0x0][0x168], RZ ; /* 0x00005a0000007a10 */
/* 0x000fe40007f3e0ff */
/*0090*/ IADD3.X R6, R6, -0x1, RZ, P0, !PT ; /* 0xffffffff06067810 */
/* 0x000fe400007fe4ff */
/*00a0*/ IADD3.X R5, R5, -0x1, RZ, P1, !PT ; /* 0xffffffff05057810 */
/* 0x000fe40000ffe4ff */
/*00b0*/ LEA R2, P0, R3, c[0x0][0x170], 0x2 ; /* 0x00005c0003027a11 */
/* 0x000fe400078010ff */
/*00c0*/ LEA R4, P1, R0, c[0x0][0x160], 0x2 ; /* 0x0000580000047a11 */
/* 0x000fc400078210ff */
/*00d0*/ LEA.HI.X R3, R3, c[0x0][0x174], R6, 0x2, P0 ; /* 0x00005d0003037a11 */
/* 0x000fe400000f1406 */
/*00e0*/ LEA.HI.X R5, R0, c[0x0][0x164], R5, 0x2, P1 ; /* 0x0000590000057a11 */
/* 0x000fc600008f1405 */
/*00f0*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*0100*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ee2000c1e1900 */
/*0110*/ SHF.L.U32 R6, R13, 0x2, RZ ; /* 0x000000020d067819 */
/* 0x000fe200000006ff */
/*0120*/ IMAD.MOV.U32 R9, RZ, RZ, c[0x0][0x168] ; /* 0x00005a00ff097624 */
/* 0x000fc800078e00ff */
/*0130*/ IMAD R6, R9, 0x4, R6 ; /* 0x0000000409067824 */
/* 0x000fe200078e0206 */
/*0140*/ F2I.TRUNC.NTZ R0, R2 ; /* 0x0000000200007305 */
/* 0x004e30000020f100 */
/*0150*/ I2F R7, R4 ; /* 0x0000000400077306 */
/* 0x0082a20000201400 */
/*0160*/ IMAD.SHL.U32 R0, R0, 0x2, RZ ; /* 0x0000000200007824 */
/* 0x001fe200078e00ff */
/*0170*/ HFMA2.MMA R4, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff047435 */
/* 0x002fc800000001ff */
/*0180*/ STS [R13.X4], R0 ; /* 0x000000000d007388 */
/* 0x000fe20000004800 */
/*0190*/ FADD R7, R7, R7 ; /* 0x0000000707077221 */
/* 0x004fca0000000000 */
/*01a0*/ IMAD.WIDE.U32 R2, R13.reuse, R4.reuse, c[0x0][0x160] ; /* 0x000058000d027625 */
/* 0x0c0fe200078e0004 */
/*01b0*/ STS [R6], R7 ; /* 0x0000000706007388 */
/* 0x000fe60000000800 */
/*01c0*/ IMAD.WIDE.U32 R4, R13, R4, c[0x0][0x170] ; /* 0x00005c000d047625 */
/* 0x000fe200078e0004 */
/*01d0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*01e0*/ LDS R9, [R13.X4] ; /* 0x000000000d097984 */
/* 0x000e280000004800 */
/*01f0*/ LDS R11, [R6] ; /* 0x00000000060b7984 */
/* 0x000e680000000800 */
/*0200*/ STG.E [R2.64], R9 ; /* 0x0000000902007986 */
/* 0x001fe8000c101904 */
/*0210*/ STG.E [R4.64], R11 ; /* 0x0000000b04007986 */
/* 0x002fe2000c101904 */
/*0220*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0230*/ BRA 0x230; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0280*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0290*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z16Manipulate_ArrayPi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R2, SR_TID.X ; /* 0x0000000000027919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0040*/ IMAD.WIDE.U32 R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x001fca00078e0003 */
/*0050*/ LDG.E R0, [R2.64] ; /* 0x0000000402007981 */
/* 0x000ea4000c1e1900 */
/*0060*/ LEA R5, R0, 0x1, 0x4 ; /* 0x0000000100057811 */
/* 0x004fca00078e20ff */
/*0070*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe2000c101904 */
/*0080*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0090*/ BRA 0x90; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z27Calculate_Array_With_SharedPi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R9, SR_TID.X ; /* 0x0000000000097919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R2, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff027435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0040*/ SHF.L.U32 R0, R9, 0x1, RZ ; /* 0x0000000109007819 */
/* 0x001fd000000006ff */
/*0050*/ IMAD.WIDE.U32 R4, R9.reuse, R2.reuse, c[0x4][0x0] ; /* 0x0100000009047625 */
/* 0x0c0fe200078e0002 */
/*0060*/ STS [R9.X4], R0 ; /* 0x0000000009007388 */
/* 0x000fe60000004800 */
/*0070*/ IMAD.WIDE.U32 R2, R9, R2, c[0x0][0x160] ; /* 0x0000580009027625 */
/* 0x000fe200078e0002 */
/*0080*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0090*/ LDG.E.CONSTANT R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e9900 */
/*00a0*/ LDG.E R7, [R2.64] ; /* 0x0000000402077981 */
/* 0x000ea8000c1e1900 */
/*00b0*/ LDS R6, [R9.X4] ; /* 0x0000000009067984 */
/* 0x000ea40000004800 */
/*00c0*/ IMAD R6, R6, R7, R4 ; /* 0x0000000706067224 */
/* 0x004fca00078e0204 */
/*00d0*/ STS [R9.X4], R6 ; /* 0x0000000609007388 */
/* 0x000fe80000004800 */
/*00e0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*00f0*/ LDS R7, [R9.X4] ; /* 0x0000000009077984 */
/* 0x000e280000004800 */
/*0100*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */
/* 0x001fe2000c101904 */
/*0110*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0120*/ BRA 0x120; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z15Hanon_kernel_14Pi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */
/* 0x000e220000002500 */
/*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R2, R2, c[0x0][0x0], R3 ; /* 0x0000000002027a24 */
/* 0x001fca00078e0203 */
/*0060*/ IMAD.WIDE R4, R2, R7, c[0x4][0x0] ; /* 0x0100000002047625 */
/* 0x000fc800078e0207 */
/*0070*/ IMAD.WIDE R2, R2, R7, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fe400078e0207 */
/*0080*/ LDG.E.CONSTANT R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea8000c1e9900 */
/*0090*/ LDG.E R0, [R2.64] ; /* 0x0000000402007981 */
/* 0x000ea4000c1e1900 */
/*00a0*/ IADD3 R7, R0, R5, RZ ; /* 0x0000000500077210 */
/* 0x004fca0007ffe0ff */
/*00b0*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */
/* 0x000fe2000c101904 */
/*00c0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00d0*/ BRA 0xd0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z12Hanon_kernelPi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */
/* 0x000e220000002500 */
/*0020*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R2, R2, c[0x0][0x0], R3 ; /* 0x0000000002027a24 */
/* 0x001fca00078e0203 */
/*0060*/ IMAD.WIDE R2, R2, R5, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fca00078e0205 */
/*0070*/ LDG.E R0, [R2.64] ; /* 0x0000000402007981 */
/* 0x000ea4000c1e1900 */
/*0080*/ IADD3 R5, R0, 0x1, RZ ; /* 0x0000000100057810 */
/* 0x004fca0007ffe0ff */
/*0090*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe2000c101904 */
/*00a0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00b0*/ BRA 0xb0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z12Hanon_kernelPi
.globl _Z12Hanon_kernelPi
.p2align 8
.type _Z12Hanon_kernelPi,@function
_Z12Hanon_kernelPi:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x14
s_load_b64 s[0:1], s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
v_add_co_u32 v0, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_load_b32 v2, v[0:1], off
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v2, 1, v2
global_store_b32 v[0:1], v2, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z12Hanon_kernelPi
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 264
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z12Hanon_kernelPi, .Lfunc_end0-_Z12Hanon_kernelPi
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z15Hanon_kernel_14Pi
.globl _Z15Hanon_kernel_14Pi
.p2align 8
.type _Z15Hanon_kernel_14Pi,@function
_Z15Hanon_kernel_14Pi:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x14
s_load_b64 s[0:1], s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_getpc_b64 s[2:3]
s_add_u32 s2, s2, _ZL5b_dev@rel32@lo+4
s_addc_u32 s3, s3, _ZL5b_dev@rel32@hi+12
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
v_add_co_u32 v2, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, v0, s2
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
global_load_b32 v4, v[2:3], off
global_load_b32 v0, v[0:1], off
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v0, v0, v4
global_store_b32 v[2:3], v0, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z15Hanon_kernel_14Pi
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 264
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z15Hanon_kernel_14Pi, .Lfunc_end1-_Z15Hanon_kernel_14Pi
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z27Calculate_Array_With_SharedPi
.globl _Z27Calculate_Array_With_SharedPi
.p2align 8
.type _Z27Calculate_Array_With_SharedPi,@function
_Z27Calculate_Array_With_SharedPi:
s_load_b64 s[0:1], s[0:1], 0x0
v_lshlrev_b32_e32 v1, 1, v0
v_lshlrev_b32_e32 v3, 2, v0
s_getpc_b64 s[2:3]
s_add_u32 s2, s2, _ZL5b_dev@rel32@lo+4
s_addc_u32 s3, s3, _ZL5b_dev@rel32@hi+12
ds_store_b32 v3, v1
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_clause 0x1
global_load_b32 v4, v3, s[0:1]
global_load_b32 v0, v3, s[2:3]
ds_load_b32 v5, v3
s_waitcnt vmcnt(0) lgkmcnt(0)
v_mad_u64_u32 v[1:2], null, v4, v5, v[0:1]
ds_store_b32 v3, v1
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
ds_load_b32 v0, v3
s_waitcnt lgkmcnt(0)
global_store_b32 v3, v0, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z27Calculate_Array_With_SharedPi
.amdhsa_group_segment_fixed_size 32
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 8
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 4
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end2:
.size _Z27Calculate_Array_With_SharedPi, .Lfunc_end2-_Z27Calculate_Array_With_SharedPi
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z16Manipulate_ArrayPi
.globl _Z16Manipulate_ArrayPi
.p2align 8
.type _Z16Manipulate_ArrayPi,@function
_Z16Manipulate_ArrayPi:
s_load_b64 s[0:1], s[0:1], 0x0
v_lshlrev_b32_e32 v0, 2, v0
s_waitcnt lgkmcnt(0)
global_load_b32 v1, v0, s[0:1]
s_waitcnt vmcnt(0)
v_lshl_or_b32 v1, v1, 4, 1
global_store_b32 v0, v1, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z16Manipulate_ArrayPi
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 8
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 2
.amdhsa_next_free_sgpr 2
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end3:
.size _Z16Manipulate_ArrayPi, .Lfunc_end3-_Z16Manipulate_ArrayPi
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z50Reverse_And_Multiply_Two_Arrays_With_Extern_SharedPimPfm
.globl _Z50Reverse_And_Multiply_Two_Arrays_With_Extern_SharedPimPfm
.p2align 8
.type _Z50Reverse_And_Multiply_Two_Arrays_With_Extern_SharedPimPfm,@function
_Z50Reverse_And_Multiply_Two_Arrays_With_Extern_SharedPimPfm:
s_load_b256 s[0:7], s[0:1], 0x0
v_not_b32_e32 v1, v0
v_lshlrev_b32_e32 v0, 2, v0
v_mov_b32_e32 v2, -1
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
v_lshlrev_b64 v[1:2], 2, v[1:2]
s_lshl_b64 s[6:7], s[6:7], 2
s_lshl_b32 s8, s2, 2
s_add_u32 s6, s6, s4
s_addc_u32 s7, s7, s5
s_lshl_b64 s[2:3], s[2:3], 2
v_add_co_u32 v3, vcc_lo, s6, v1
s_add_u32 s2, s2, s0
v_add_co_ci_u32_e32 v4, vcc_lo, s7, v2, vcc_lo
s_addc_u32 s3, s3, s1
v_add_co_u32 v1, vcc_lo, s2, v1
v_add_co_ci_u32_e32 v2, vcc_lo, s3, v2, vcc_lo
global_load_b32 v3, v[3:4], off
global_load_b32 v1, v[1:2], off
s_waitcnt vmcnt(1)
v_cvt_i32_f32_e32 v2, v3
s_waitcnt vmcnt(0)
v_cvt_f32_i32_e32 v1, v1
s_delay_alu instid0(VALU_DEP_1)
v_dual_add_f32 v1, v1, v1 :: v_dual_lshlrev_b32 v2, 1, v2
v_add_nc_u32_e32 v3, 0, v0
v_add3_u32 v4, 0, s8, v0
ds_store_b32 v3, v2
ds_store_b32 v4, v1
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
ds_load_b32 v1, v3
ds_load_b32 v2, v4
s_waitcnt lgkmcnt(1)
global_store_b32 v0, v1, s[0:1]
s_waitcnt lgkmcnt(0)
global_store_b32 v0, v2, s[4:5]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z50Reverse_And_Multiply_Two_Arrays_With_Extern_SharedPimPfm
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 32
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 9
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end4:
.size _Z50Reverse_And_Multiply_Two_Arrays_With_Extern_SharedPimPfm, .Lfunc_end4-_Z50Reverse_And_Multiply_Two_Arrays_With_Extern_SharedPimPfm
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type _ZL5b_dev,@object
.section .rodata.cst32,"aM",@progbits,32
.p2align 4, 0x0
_ZL5b_dev:
.long 101
.long 102
.long 103
.long 104
.long 105
.long 106
.long 107
.long 108
.size _ZL5b_dev, 32
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: hidden_block_count_x
- .offset: 12
.size: 4
.value_kind: hidden_block_count_y
- .offset: 16
.size: 4
.value_kind: hidden_block_count_z
- .offset: 20
.size: 2
.value_kind: hidden_group_size_x
- .offset: 22
.size: 2
.value_kind: hidden_group_size_y
- .offset: 24
.size: 2
.value_kind: hidden_group_size_z
- .offset: 26
.size: 2
.value_kind: hidden_remainder_x
- .offset: 28
.size: 2
.value_kind: hidden_remainder_y
- .offset: 30
.size: 2
.value_kind: hidden_remainder_z
- .offset: 48
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 72
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 264
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z12Hanon_kernelPi
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z12Hanon_kernelPi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: hidden_block_count_x
- .offset: 12
.size: 4
.value_kind: hidden_block_count_y
- .offset: 16
.size: 4
.value_kind: hidden_block_count_z
- .offset: 20
.size: 2
.value_kind: hidden_group_size_x
- .offset: 22
.size: 2
.value_kind: hidden_group_size_y
- .offset: 24
.size: 2
.value_kind: hidden_group_size_z
- .offset: 26
.size: 2
.value_kind: hidden_remainder_x
- .offset: 28
.size: 2
.value_kind: hidden_remainder_y
- .offset: 30
.size: 2
.value_kind: hidden_remainder_z
- .offset: 48
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 72
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 264
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z15Hanon_kernel_14Pi
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z15Hanon_kernel_14Pi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 32
.kernarg_segment_align: 8
.kernarg_segment_size: 8
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z27Calculate_Array_With_SharedPi
.private_segment_fixed_size: 0
.sgpr_count: 4
.sgpr_spill_count: 0
.symbol: _Z27Calculate_Array_With_SharedPi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 8
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z16Manipulate_ArrayPi
.private_segment_fixed_size: 0
.sgpr_count: 2
.sgpr_spill_count: 0
.symbol: _Z16Manipulate_ArrayPi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 2
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 8
.value_kind: by_value
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 8
.value_kind: by_value
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 32
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z50Reverse_And_Multiply_Two_Arrays_With_Extern_SharedPimPfm
.private_segment_fixed_size: 0
.sgpr_count: 11
.sgpr_spill_count: 0
.symbol: _Z50Reverse_And_Multiply_Two_Arrays_With_Extern_SharedPimPfm.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
#include <algorithm>
#include <cstdlib>
#include <iostream>
int main(void)
{
// generate 32M random numbers serially
thrust::host_vector<int> h_vec(32 << 20);
std::generate(h_vec.begin(), h_vec.end(), rand);
thrust::host_vector<int> h_check = h_vec;
// transfer data to the device
thrust::device_vector<int> d_vec = h_vec;
// transfer data back to host
thrust::copy(d_vec.begin(), d_vec.end(), h_vec.begin());
uint8_t flag = 0;
for ( size_t i = 0; i < h_vec.size(); i++ )
if ( h_vec[ i ] != h_check[ i ] )
{
std::cerr << "Vector check error!\n";
flag = 1;
break;
}
if ( flag == 0 )
std::cout << "Vector check OK!\n";
// sort data on the device (846M keys per second on GeForce GTX 480)
thrust::sort(d_vec.begin(), d_vec.end());
// transfer data back to host
thrust::copy(d_vec.begin(), d_vec.end(), h_vec.begin());
return 0;
} | #include <hip/hip_runtime.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
#include <algorithm>
#include <cstdlib>
#include <iostream>
int main(void)
{
// generate 32M random numbers serially
thrust::host_vector<int> h_vec(32 << 20);
std::generate(h_vec.begin(), h_vec.end(), rand);
thrust::host_vector<int> h_check = h_vec;
// transfer data to the device
thrust::device_vector<int> d_vec = h_vec;
// transfer data back to host
thrust::copy(d_vec.begin(), d_vec.end(), h_vec.begin());
uint8_t flag = 0;
for ( size_t i = 0; i < h_vec.size(); i++ )
if ( h_vec[ i ] != h_check[ i ] )
{
std::cerr << "Vector check error!\n";
flag = 1;
break;
}
if ( flag == 0 )
std::cout << "Vector check OK!\n";
// sort data on the device (846M keys per second on GeForce GTX 480)
thrust::sort(d_vec.begin(), d_vec.end());
// transfer data back to host
thrust::copy(d_vec.begin(), d_vec.end(), h_vec.begin());
return 0;
} |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "includes.h"
__global__ void OPT_1_HIST(int* lcm, int* hist, int n) {
//
int vertex = blockIdx.x;
int vcomp = threadIdx.x;
bool equal;
//
__shared__ int cval;
//
if(vcomp == 0)
cval = 0;
__syncthreads();
//
if(vertex < n && vcomp < n)
for(int i = vcomp; i < n; i += blockDim.x) {
if(vertex == i) {
atomicAdd(&cval, 1);
continue;
}
equal = false;
for(int j = 0; j < n; j++) {
if(lcm[vertex*n + j] == lcm[i*n + j])
equal = true;
else {
equal = false;
break;
}
}
if(equal)
atomicAdd(&cval, 1);
}
__syncthreads();
if(vertex < n && vcomp == 0 && cval > 0) {
atomicAdd(&hist[cval], 1);
//printf("\nv%d: %d\n", vertex, cval);
}
} | code for sm_80
Function : _Z10OPT_1_HISTPiS_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R6, SR_TID.X ; /* 0x0000000000067919 */
/* 0x000e220000002100 */
/*0020*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*0030*/ S2R R9, SR_CTAID.X ; /* 0x0000000000097919 */
/* 0x000e620000002500 */
/*0040*/ ISETP.NE.AND P0, PT, R6, RZ, PT ; /* 0x000000ff0600720c */
/* 0x001fe20003f05270 */
/*0050*/ IMAD.MOV.U32 R0, RZ, RZ, R6 ; /* 0x000000ffff007224 */
/* 0x000fd800078e0006 */
/*0060*/ @!P0 STS [RZ], RZ ; /* 0x000000ffff008388 */
/* 0x000fe80000000800 */
/*0070*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0080*/ ISETP.GE.AND P0, PT, R9, c[0x0][0x170], PT ; /* 0x00005c0009007a0c */
/* 0x002fc80003f06270 */
/*0090*/ ISETP.GE.OR P1, PT, R0, c[0x0][0x170], P0 ; /* 0x00005c0000007a0c */
/* 0x000fda0000726670 */
/*00a0*/ @P1 BRA 0x320 ; /* 0x0000027000001947 */
/* 0x000fea0003800000 */
/*00b0*/ ISETP.LT.AND P1, PT, RZ, c[0x0][0x170], PT ; /* 0x00005c00ff007a0c */
/* 0x000fda0003f21270 */
/*00c0*/ @P1 BRA 0x170 ; /* 0x000000a000001947 */
/* 0x000fea0003800000 */
/*00d0*/ ISETP.NE.AND P1, PT, R9, R0, PT ; /* 0x000000000900720c */
/* 0x000fe20003f25270 */
/*00e0*/ YIELD ; /* 0x0000000000007946 */
/* 0x000fe20003800000 */
/*00f0*/ IADD3 R0, R0, c[0x0][0x0], RZ ; /* 0x0000000000007a10 */
/* 0x000fe20007ffe0ff */
/*0100*/ BSSY B0, 0x150 ; /* 0x0000004000007945 */
/* 0x000fe60003800000 */
/*0110*/ ISETP.GE.AND P2, PT, R0, c[0x0][0x170], PT ; /* 0x00005c0000007a0c */
/* 0x000fce0003f46270 */
/*0120*/ @P1 BRA 0x140 ; /* 0x0000001000001947 */
/* 0x000fea0003800000 */
/*0130*/ ATOMS.POPC.INC.32 RZ, [URZ] ; /* 0x00000000ffff7f8c */
/* 0x000fe4000d00003f */
/*0140*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0150*/ @!P2 BRA 0xd0 ; /* 0xffffff700000a947 */
/* 0x000fea000383ffff */
/*0160*/ BRA 0x320 ; /* 0x000001b000007947 */
/* 0x000fea0003800000 */
/*0170*/ ISETP.NE.AND P1, PT, R9, R0, PT ; /* 0x000000000900720c */
/* 0x000fe20003f25270 */
/*0180*/ YIELD ; /* 0x0000000000007946 */
/* 0x000fe20003800000 */
/*0190*/ BSSY B0, 0x2f0 ; /* 0x0000015000007945 */
/* 0x000ff60003800000 */
/*01a0*/ @!P1 BRA 0x2d0 ; /* 0x0000012000009947 */
/* 0x000fea0003800000 */
/*01b0*/ BSSY B1, 0x2b0 ; /* 0x000000f000017945 */
/* 0x000fe20003800000 */
/*01c0*/ IMAD.MOV.U32 R7, RZ, RZ, RZ ; /* 0x000000ffff077224 */
/* 0x000fc400078e00ff */
/*01d0*/ IMAD.MOV.U32 R5, RZ, RZ, 0x4 ; /* 0x00000004ff057424 */
/* 0x000fe400078e00ff */
/*01e0*/ IMAD R2, R9, c[0x0][0x170], R7 ; /* 0x00005c0009027a24 */
/* 0x000fc400078e0207 */
/*01f0*/ IMAD R4, R0, c[0x0][0x170], R7 ; /* 0x00005c0000047a24 */
/* 0x000fe400078e0207 */
/*0200*/ IMAD.WIDE R2, R2, R5, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fc800078e0205 */
/*0210*/ IMAD.WIDE R4, R4, R5, c[0x0][0x160] ; /* 0x0000580004047625 */
/* 0x000fe400078e0205 */
/*0220*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea8000c1e1900 */
/*0230*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea4000c1e1900 */
/*0240*/ ISETP.NE.AND P1, PT, R3, R4, PT ; /* 0x000000040300720c */
/* 0x004fda0003f25270 */
/*0250*/ @P1 BREAK B1 ; /* 0x0000000000011942 */
/* 0x000fe20003800000 */
/*0260*/ @P1 BRA 0x2e0 ; /* 0x0000007000001947 */
/* 0x000fea0003800000 */
/*0270*/ IADD3 R7, R7, 0x1, RZ ; /* 0x0000000107077810 */
/* 0x000fc80007ffe0ff */
/*0280*/ ISETP.GE.AND P1, PT, R7, c[0x0][0x170], PT ; /* 0x00005c0007007a0c */
/* 0x000fda0003f26270 */
/*0290*/ @!P1 BRA 0x1d0 ; /* 0xffffff3000009947 */
/* 0x000fea000383ffff */
/*02a0*/ BSYNC B1 ; /* 0x0000000000017941 */
/* 0x000fea0003800000 */
/*02b0*/ ATOMS.POPC.INC.32 RZ, [URZ] ; /* 0x00000000ffff7f8c */
/* 0x000fe2000d00003f */
/*02c0*/ BRA 0x2e0 ; /* 0x0000001000007947 */
/* 0x000fea0003800000 */
/*02d0*/ ATOMS.POPC.INC.32 RZ, [URZ] ; /* 0x00000000ffff7f8c */
/* 0x000fe4000d00003f */
/*02e0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*02f0*/ IADD3 R0, R0, c[0x0][0x0], RZ ; /* 0x0000000000007a10 */
/* 0x000fc80007ffe0ff */
/*0300*/ ISETP.GE.AND P1, PT, R0, c[0x0][0x170], PT ; /* 0x00005c0000007a0c */
/* 0x000fda0003f26270 */
/*0310*/ @!P1 BRA 0x170 ; /* 0xfffffe5000009947 */
/* 0x000fea000383ffff */
/*0320*/ WARPSYNC 0xffffffff ; /* 0xffffffff00007948 */
/* 0x000fe40003800000 */
/*0330*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0340*/ ISETP.NE.OR P0, PT, R6, RZ, P0 ; /* 0x000000ff0600720c */
/* 0x000fca0000705670 */
/*0350*/ LDS R2, [RZ] ; /* 0x00000000ff027984 */
/* 0x000e240000000800 */
/*0360*/ ISETP.LT.OR P0, PT, R2, 0x1, P0 ; /* 0x000000010200780c */
/* 0x001fda0000701670 */
/*0370*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0380*/ S2R R0, SR_LANEID ; /* 0x0000000000007919 */
/* 0x000e220000000000 */
/*0390*/ VOTEU.ANY UR6, UPT, PT ; /* 0x0000000000067886 */
/* 0x000fe200038e0100 */
/*03a0*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fe200078e00ff */
/*03b0*/ UFLO.U32 UR7, UR6 ; /* 0x00000006000772bd */
/* 0x000fe200080e0000 */
/*03c0*/ POPC R5, UR6 ; /* 0x0000000600057d09 */
/* 0x000e640008000000 */
/*03d0*/ IMAD.WIDE R2, R2, R3, c[0x0][0x168] ; /* 0x00005a0002027625 */
/* 0x000fc600078e0203 */
/*03e0*/ ISETP.EQ.U32.AND P0, PT, R0, UR7, PT ; /* 0x0000000700007c0c */
/* 0x001fda000bf02070 */
/*03f0*/ @P0 RED.E.ADD.STRONG.GPU [R2.64], R5 ; /* 0x000000050200098e */
/* 0x002fe2000c10e184 */
/*0400*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0410*/ BRA 0x410; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0420*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0430*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0440*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0450*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0460*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0470*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0480*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0490*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
__global__ void OPT_1_HIST(int* lcm, int* hist, int n) {
//
int vertex = blockIdx.x;
int vcomp = threadIdx.x;
bool equal;
//
__shared__ int cval;
//
if(vcomp == 0)
cval = 0;
__syncthreads();
//
if(vertex < n && vcomp < n)
for(int i = vcomp; i < n; i += blockDim.x) {
if(vertex == i) {
atomicAdd(&cval, 1);
continue;
}
equal = false;
for(int j = 0; j < n; j++) {
if(lcm[vertex*n + j] == lcm[i*n + j])
equal = true;
else {
equal = false;
break;
}
}
if(equal)
atomicAdd(&cval, 1);
}
__syncthreads();
if(vertex < n && vcomp == 0 && cval > 0) {
atomicAdd(&hist[cval], 1);
//printf("\nv%d: %d\n", vertex, cval);
}
} | .file "tmpxft_0012104b_00000000-6_OPT_1_HIST.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z33__device_stub__Z10OPT_1_HISTPiS_iPiS_i
.type _Z33__device_stub__Z10OPT_1_HISTPiS_iPiS_i, @function
_Z33__device_stub__Z10OPT_1_HISTPiS_iPiS_i:
.LFB2051:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z10OPT_1_HISTPiS_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z33__device_stub__Z10OPT_1_HISTPiS_iPiS_i, .-_Z33__device_stub__Z10OPT_1_HISTPiS_iPiS_i
.globl _Z10OPT_1_HISTPiS_i
.type _Z10OPT_1_HISTPiS_i, @function
_Z10OPT_1_HISTPiS_i:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z33__device_stub__Z10OPT_1_HISTPiS_iPiS_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z10OPT_1_HISTPiS_i, .-_Z10OPT_1_HISTPiS_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z10OPT_1_HISTPiS_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z10OPT_1_HISTPiS_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
__global__ void OPT_1_HIST(int* lcm, int* hist, int n) {
//
int vertex = blockIdx.x;
int vcomp = threadIdx.x;
bool equal;
//
__shared__ int cval;
//
if(vcomp == 0)
cval = 0;
__syncthreads();
//
if(vertex < n && vcomp < n)
for(int i = vcomp; i < n; i += blockDim.x) {
if(vertex == i) {
atomicAdd(&cval, 1);
continue;
}
equal = false;
for(int j = 0; j < n; j++) {
if(lcm[vertex*n + j] == lcm[i*n + j])
equal = true;
else {
equal = false;
break;
}
}
if(equal)
atomicAdd(&cval, 1);
}
__syncthreads();
if(vertex < n && vcomp == 0 && cval > 0) {
atomicAdd(&hist[cval], 1);
//printf("\nv%d: %d\n", vertex, cval);
}
} | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void OPT_1_HIST(int* lcm, int* hist, int n) {
//
int vertex = blockIdx.x;
int vcomp = threadIdx.x;
bool equal;
//
__shared__ int cval;
//
if(vcomp == 0)
cval = 0;
__syncthreads();
//
if(vertex < n && vcomp < n)
for(int i = vcomp; i < n; i += blockDim.x) {
if(vertex == i) {
atomicAdd(&cval, 1);
continue;
}
equal = false;
for(int j = 0; j < n; j++) {
if(lcm[vertex*n + j] == lcm[i*n + j])
equal = true;
else {
equal = false;
break;
}
}
if(equal)
atomicAdd(&cval, 1);
}
__syncthreads();
if(vertex < n && vcomp == 0 && cval > 0) {
atomicAdd(&hist[cval], 1);
//printf("\nv%d: %d\n", vertex, cval);
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void OPT_1_HIST(int* lcm, int* hist, int n) {
//
int vertex = blockIdx.x;
int vcomp = threadIdx.x;
bool equal;
//
__shared__ int cval;
//
if(vcomp == 0)
cval = 0;
__syncthreads();
//
if(vertex < n && vcomp < n)
for(int i = vcomp; i < n; i += blockDim.x) {
if(vertex == i) {
atomicAdd(&cval, 1);
continue;
}
equal = false;
for(int j = 0; j < n; j++) {
if(lcm[vertex*n + j] == lcm[i*n + j])
equal = true;
else {
equal = false;
break;
}
}
if(equal)
atomicAdd(&cval, 1);
}
__syncthreads();
if(vertex < n && vcomp == 0 && cval > 0) {
atomicAdd(&hist[cval], 1);
//printf("\nv%d: %d\n", vertex, cval);
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z10OPT_1_HISTPiS_i
.globl _Z10OPT_1_HISTPiS_i
.p2align 8
.type _Z10OPT_1_HISTPiS_i,@function
_Z10OPT_1_HISTPiS_i:
v_cmp_eq_u32_e64 s2, 0, v0
s_delay_alu instid0(VALU_DEP_1)
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_2
v_mov_b32_e32 v1, 0
ds_store_b32 v1, v1
.LBB0_2:
s_or_b32 exec_lo, exec_lo, s3
s_load_b32 s10, s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
v_cmp_gt_i32_e32 vcc_lo, s10, v0
s_cmp_lt_i32 s15, s10
s_cselect_b32 s3, -1, 0
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s4, s3, vcc_lo
s_and_saveexec_b32 s11, s4
s_cbranch_execz .LBB0_17
s_clause 0x1
s_load_b32 s8, s[0:1], 0x24
s_load_b64 s[4:5], s[0:1], 0x0
s_cmp_gt_i32 s10, 0
s_mul_i32 s6, s15, s10
v_mul_lo_u32 v1, v0, s10
s_cselect_b32 s13, -1, 0
s_ashr_i32 s7, s6, 31
v_mov_b32_e32 v4, 0
s_lshl_b64 s[6:7], s[6:7], 2
s_mov_b32 s12, 0
s_waitcnt lgkmcnt(0)
s_and_b32 s14, s8, 0xffff
s_add_u32 s6, s4, s6
s_mul_i32 s16, s10, s14
s_addc_u32 s7, s5, s7
s_branch .LBB0_5
.LBB0_4:
s_or_b32 exec_lo, exec_lo, s8
v_add_nc_u32_e32 v0, s14, v0
v_add_nc_u32_e32 v1, s16, v1
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_cmp_le_i32_e32 vcc_lo, s10, v0
s_or_b32 s12, vcc_lo, s12
s_and_not1_b32 exec_lo, exec_lo, s12
s_cbranch_execz .LBB0_17
.LBB0_5:
s_mov_b32 s9, -1
s_mov_b32 s17, exec_lo
v_cmpx_ne_u32_e64 s15, v0
s_cbranch_execz .LBB0_14
s_and_b32 vcc_lo, exec_lo, s13
s_cbranch_vccz .LBB0_12
v_ashrrev_i32_e32 v2, 31, v1
s_mov_b32 s18, 0
s_mov_b64 s[8:9], s[6:7]
s_mov_b32 s20, s10
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[1:2]
v_add_co_u32 v2, vcc_lo, s4, v2
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v3, vcc_lo
s_set_inst_prefetch_distance 0x1
s_branch .LBB0_9
.p2align 6
.LBB0_8:
s_or_b32 exec_lo, exec_lo, s23
s_xor_b32 s23, s21, -1
s_and_b32 s24, exec_lo, s22
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
s_or_b32 s18, s24, s18
s_and_not1_b32 s19, s19, exec_lo
s_and_b32 s23, s23, exec_lo
s_or_b32 s19, s19, s23
s_and_not1_b32 exec_lo, exec_lo, s18
s_cbranch_execz .LBB0_11
.LBB0_9:
global_load_b32 v5, v[2:3], off
s_load_b32 s23, s[8:9], 0x0
s_or_b32 s21, s21, exec_lo
s_or_b32 s22, s22, exec_lo
s_waitcnt vmcnt(0) lgkmcnt(0)
v_cmp_eq_u32_e32 vcc_lo, s23, v5
s_and_saveexec_b32 s23, vcc_lo
s_cbranch_execz .LBB0_8
s_add_i32 s20, s20, -1
s_add_u32 s8, s8, 4
s_addc_u32 s9, s9, 0
s_cmp_eq_u32 s20, 0
v_add_co_u32 v2, vcc_lo, v2, 4
s_cselect_b32 s24, -1, 0
v_add_co_ci_u32_e32 v3, vcc_lo, 0, v3, vcc_lo
s_and_not1_b32 s22, s22, exec_lo
s_and_b32 s24, s24, exec_lo
s_and_not1_b32 s21, s21, exec_lo
s_or_b32 s22, s22, s24
s_branch .LBB0_8
.LBB0_11:
s_set_inst_prefetch_distance 0x2
s_or_b32 exec_lo, exec_lo, s18
s_branch .LBB0_13
.LBB0_12:
s_mov_b32 s19, 0
.LBB0_13:
s_delay_alu instid0(SALU_CYCLE_1)
s_or_not1_b32 s9, s19, exec_lo
.LBB0_14:
s_or_b32 exec_lo, exec_lo, s17
s_and_saveexec_b32 s8, s9
s_cbranch_execz .LBB0_4
s_mov_b32 s9, exec_lo
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mbcnt_lo_u32_b32 v2, s9, 0
v_cmp_eq_u32_e32 vcc_lo, 0, v2
s_and_b32 s17, exec_lo, vcc_lo
s_delay_alu instid0(SALU_CYCLE_1)
s_mov_b32 exec_lo, s17
s_cbranch_execz .LBB0_4
s_bcnt1_i32_b32 s9, s9
s_delay_alu instid0(SALU_CYCLE_1)
v_mov_b32_e32 v2, s9
ds_add_u32 v4, v2
s_branch .LBB0_4
.LBB0_17:
s_or_b32 exec_lo, exec_lo, s11
v_mov_b32_e32 v0, 0
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_and_b32 s2, s2, s3
ds_load_b32 v0, v0
s_waitcnt lgkmcnt(0)
v_readfirstlane_b32 s4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
s_cmp_gt_i32 s4, 0
s_cselect_b32 s3, -1, 0
s_and_b32 s2, s2, s3
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_20
s_mov_b32 s2, exec_lo
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mbcnt_lo_u32_b32 v1, s2, 0
v_cmp_eq_u32_e32 vcc_lo, 0, v1
s_and_b32 s3, exec_lo, vcc_lo
s_delay_alu instid0(SALU_CYCLE_1)
s_mov_b32 exec_lo, s3
s_cbranch_execz .LBB0_20
s_load_b64 s[0:1], s[0:1], 0x8
s_bcnt1_i32_b32 s2, s2
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s2
v_lshlrev_b64 v[0:1], 2, v[0:1]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_atomic_add_u32 v[0:1], v2, off
.LBB0_20:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z10OPT_1_HISTPiS_i
.amdhsa_group_segment_fixed_size 4
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 25
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z10OPT_1_HISTPiS_i, .Lfunc_end0-_Z10OPT_1_HISTPiS_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 4
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z10OPT_1_HISTPiS_i
.private_segment_fixed_size: 0
.sgpr_count: 27
.sgpr_spill_count: 0
.symbol: _Z10OPT_1_HISTPiS_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void OPT_1_HIST(int* lcm, int* hist, int n) {
//
int vertex = blockIdx.x;
int vcomp = threadIdx.x;
bool equal;
//
__shared__ int cval;
//
if(vcomp == 0)
cval = 0;
__syncthreads();
//
if(vertex < n && vcomp < n)
for(int i = vcomp; i < n; i += blockDim.x) {
if(vertex == i) {
atomicAdd(&cval, 1);
continue;
}
equal = false;
for(int j = 0; j < n; j++) {
if(lcm[vertex*n + j] == lcm[i*n + j])
equal = true;
else {
equal = false;
break;
}
}
if(equal)
atomicAdd(&cval, 1);
}
__syncthreads();
if(vertex < n && vcomp == 0 && cval > 0) {
atomicAdd(&hist[cval], 1);
//printf("\nv%d: %d\n", vertex, cval);
}
} | .text
.file "OPT_1_HIST.hip"
.globl _Z25__device_stub__OPT_1_HISTPiS_i # -- Begin function _Z25__device_stub__OPT_1_HISTPiS_i
.p2align 4, 0x90
.type _Z25__device_stub__OPT_1_HISTPiS_i,@function
_Z25__device_stub__OPT_1_HISTPiS_i: # @_Z25__device_stub__OPT_1_HISTPiS_i
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z10OPT_1_HISTPiS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z25__device_stub__OPT_1_HISTPiS_i, .Lfunc_end0-_Z25__device_stub__OPT_1_HISTPiS_i
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z10OPT_1_HISTPiS_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z10OPT_1_HISTPiS_i,@object # @_Z10OPT_1_HISTPiS_i
.section .rodata,"a",@progbits
.globl _Z10OPT_1_HISTPiS_i
.p2align 3, 0x0
_Z10OPT_1_HISTPiS_i:
.quad _Z25__device_stub__OPT_1_HISTPiS_i
.size _Z10OPT_1_HISTPiS_i, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z10OPT_1_HISTPiS_i"
.size .L__unnamed_1, 20
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z25__device_stub__OPT_1_HISTPiS_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z10OPT_1_HISTPiS_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z10OPT_1_HISTPiS_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R6, SR_TID.X ; /* 0x0000000000067919 */
/* 0x000e220000002100 */
/*0020*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*0030*/ S2R R9, SR_CTAID.X ; /* 0x0000000000097919 */
/* 0x000e620000002500 */
/*0040*/ ISETP.NE.AND P0, PT, R6, RZ, PT ; /* 0x000000ff0600720c */
/* 0x001fe20003f05270 */
/*0050*/ IMAD.MOV.U32 R0, RZ, RZ, R6 ; /* 0x000000ffff007224 */
/* 0x000fd800078e0006 */
/*0060*/ @!P0 STS [RZ], RZ ; /* 0x000000ffff008388 */
/* 0x000fe80000000800 */
/*0070*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0080*/ ISETP.GE.AND P0, PT, R9, c[0x0][0x170], PT ; /* 0x00005c0009007a0c */
/* 0x002fc80003f06270 */
/*0090*/ ISETP.GE.OR P1, PT, R0, c[0x0][0x170], P0 ; /* 0x00005c0000007a0c */
/* 0x000fda0000726670 */
/*00a0*/ @P1 BRA 0x320 ; /* 0x0000027000001947 */
/* 0x000fea0003800000 */
/*00b0*/ ISETP.LT.AND P1, PT, RZ, c[0x0][0x170], PT ; /* 0x00005c00ff007a0c */
/* 0x000fda0003f21270 */
/*00c0*/ @P1 BRA 0x170 ; /* 0x000000a000001947 */
/* 0x000fea0003800000 */
/*00d0*/ ISETP.NE.AND P1, PT, R9, R0, PT ; /* 0x000000000900720c */
/* 0x000fe20003f25270 */
/*00e0*/ YIELD ; /* 0x0000000000007946 */
/* 0x000fe20003800000 */
/*00f0*/ IADD3 R0, R0, c[0x0][0x0], RZ ; /* 0x0000000000007a10 */
/* 0x000fe20007ffe0ff */
/*0100*/ BSSY B0, 0x150 ; /* 0x0000004000007945 */
/* 0x000fe60003800000 */
/*0110*/ ISETP.GE.AND P2, PT, R0, c[0x0][0x170], PT ; /* 0x00005c0000007a0c */
/* 0x000fce0003f46270 */
/*0120*/ @P1 BRA 0x140 ; /* 0x0000001000001947 */
/* 0x000fea0003800000 */
/*0130*/ ATOMS.POPC.INC.32 RZ, [URZ] ; /* 0x00000000ffff7f8c */
/* 0x000fe4000d00003f */
/*0140*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0150*/ @!P2 BRA 0xd0 ; /* 0xffffff700000a947 */
/* 0x000fea000383ffff */
/*0160*/ BRA 0x320 ; /* 0x000001b000007947 */
/* 0x000fea0003800000 */
/*0170*/ ISETP.NE.AND P1, PT, R9, R0, PT ; /* 0x000000000900720c */
/* 0x000fe20003f25270 */
/*0180*/ YIELD ; /* 0x0000000000007946 */
/* 0x000fe20003800000 */
/*0190*/ BSSY B0, 0x2f0 ; /* 0x0000015000007945 */
/* 0x000ff60003800000 */
/*01a0*/ @!P1 BRA 0x2d0 ; /* 0x0000012000009947 */
/* 0x000fea0003800000 */
/*01b0*/ BSSY B1, 0x2b0 ; /* 0x000000f000017945 */
/* 0x000fe20003800000 */
/*01c0*/ IMAD.MOV.U32 R7, RZ, RZ, RZ ; /* 0x000000ffff077224 */
/* 0x000fc400078e00ff */
/*01d0*/ IMAD.MOV.U32 R5, RZ, RZ, 0x4 ; /* 0x00000004ff057424 */
/* 0x000fe400078e00ff */
/*01e0*/ IMAD R2, R9, c[0x0][0x170], R7 ; /* 0x00005c0009027a24 */
/* 0x000fc400078e0207 */
/*01f0*/ IMAD R4, R0, c[0x0][0x170], R7 ; /* 0x00005c0000047a24 */
/* 0x000fe400078e0207 */
/*0200*/ IMAD.WIDE R2, R2, R5, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fc800078e0205 */
/*0210*/ IMAD.WIDE R4, R4, R5, c[0x0][0x160] ; /* 0x0000580004047625 */
/* 0x000fe400078e0205 */
/*0220*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea8000c1e1900 */
/*0230*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea4000c1e1900 */
/*0240*/ ISETP.NE.AND P1, PT, R3, R4, PT ; /* 0x000000040300720c */
/* 0x004fda0003f25270 */
/*0250*/ @P1 BREAK B1 ; /* 0x0000000000011942 */
/* 0x000fe20003800000 */
/*0260*/ @P1 BRA 0x2e0 ; /* 0x0000007000001947 */
/* 0x000fea0003800000 */
/*0270*/ IADD3 R7, R7, 0x1, RZ ; /* 0x0000000107077810 */
/* 0x000fc80007ffe0ff */
/*0280*/ ISETP.GE.AND P1, PT, R7, c[0x0][0x170], PT ; /* 0x00005c0007007a0c */
/* 0x000fda0003f26270 */
/*0290*/ @!P1 BRA 0x1d0 ; /* 0xffffff3000009947 */
/* 0x000fea000383ffff */
/*02a0*/ BSYNC B1 ; /* 0x0000000000017941 */
/* 0x000fea0003800000 */
/*02b0*/ ATOMS.POPC.INC.32 RZ, [URZ] ; /* 0x00000000ffff7f8c */
/* 0x000fe2000d00003f */
/*02c0*/ BRA 0x2e0 ; /* 0x0000001000007947 */
/* 0x000fea0003800000 */
/*02d0*/ ATOMS.POPC.INC.32 RZ, [URZ] ; /* 0x00000000ffff7f8c */
/* 0x000fe4000d00003f */
/*02e0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*02f0*/ IADD3 R0, R0, c[0x0][0x0], RZ ; /* 0x0000000000007a10 */
/* 0x000fc80007ffe0ff */
/*0300*/ ISETP.GE.AND P1, PT, R0, c[0x0][0x170], PT ; /* 0x00005c0000007a0c */
/* 0x000fda0003f26270 */
/*0310*/ @!P1 BRA 0x170 ; /* 0xfffffe5000009947 */
/* 0x000fea000383ffff */
/*0320*/ WARPSYNC 0xffffffff ; /* 0xffffffff00007948 */
/* 0x000fe40003800000 */
/*0330*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0340*/ ISETP.NE.OR P0, PT, R6, RZ, P0 ; /* 0x000000ff0600720c */
/* 0x000fca0000705670 */
/*0350*/ LDS R2, [RZ] ; /* 0x00000000ff027984 */
/* 0x000e240000000800 */
/*0360*/ ISETP.LT.OR P0, PT, R2, 0x1, P0 ; /* 0x000000010200780c */
/* 0x001fda0000701670 */
/*0370*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0380*/ S2R R0, SR_LANEID ; /* 0x0000000000007919 */
/* 0x000e220000000000 */
/*0390*/ VOTEU.ANY UR6, UPT, PT ; /* 0x0000000000067886 */
/* 0x000fe200038e0100 */
/*03a0*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fe200078e00ff */
/*03b0*/ UFLO.U32 UR7, UR6 ; /* 0x00000006000772bd */
/* 0x000fe200080e0000 */
/*03c0*/ POPC R5, UR6 ; /* 0x0000000600057d09 */
/* 0x000e640008000000 */
/*03d0*/ IMAD.WIDE R2, R2, R3, c[0x0][0x168] ; /* 0x00005a0002027625 */
/* 0x000fc600078e0203 */
/*03e0*/ ISETP.EQ.U32.AND P0, PT, R0, UR7, PT ; /* 0x0000000700007c0c */
/* 0x001fda000bf02070 */
/*03f0*/ @P0 RED.E.ADD.STRONG.GPU [R2.64], R5 ; /* 0x000000050200098e */
/* 0x002fe2000c10e184 */
/*0400*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0410*/ BRA 0x410; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0420*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0430*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0440*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0450*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0460*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0470*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0480*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0490*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z10OPT_1_HISTPiS_i
.globl _Z10OPT_1_HISTPiS_i
.p2align 8
.type _Z10OPT_1_HISTPiS_i,@function
_Z10OPT_1_HISTPiS_i:
v_cmp_eq_u32_e64 s2, 0, v0
s_delay_alu instid0(VALU_DEP_1)
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_2
v_mov_b32_e32 v1, 0
ds_store_b32 v1, v1
.LBB0_2:
s_or_b32 exec_lo, exec_lo, s3
s_load_b32 s10, s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
v_cmp_gt_i32_e32 vcc_lo, s10, v0
s_cmp_lt_i32 s15, s10
s_cselect_b32 s3, -1, 0
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s4, s3, vcc_lo
s_and_saveexec_b32 s11, s4
s_cbranch_execz .LBB0_17
s_clause 0x1
s_load_b32 s8, s[0:1], 0x24
s_load_b64 s[4:5], s[0:1], 0x0
s_cmp_gt_i32 s10, 0
s_mul_i32 s6, s15, s10
v_mul_lo_u32 v1, v0, s10
s_cselect_b32 s13, -1, 0
s_ashr_i32 s7, s6, 31
v_mov_b32_e32 v4, 0
s_lshl_b64 s[6:7], s[6:7], 2
s_mov_b32 s12, 0
s_waitcnt lgkmcnt(0)
s_and_b32 s14, s8, 0xffff
s_add_u32 s6, s4, s6
s_mul_i32 s16, s10, s14
s_addc_u32 s7, s5, s7
s_branch .LBB0_5
.LBB0_4:
s_or_b32 exec_lo, exec_lo, s8
v_add_nc_u32_e32 v0, s14, v0
v_add_nc_u32_e32 v1, s16, v1
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_cmp_le_i32_e32 vcc_lo, s10, v0
s_or_b32 s12, vcc_lo, s12
s_and_not1_b32 exec_lo, exec_lo, s12
s_cbranch_execz .LBB0_17
.LBB0_5:
s_mov_b32 s9, -1
s_mov_b32 s17, exec_lo
v_cmpx_ne_u32_e64 s15, v0
s_cbranch_execz .LBB0_14
s_and_b32 vcc_lo, exec_lo, s13
s_cbranch_vccz .LBB0_12
v_ashrrev_i32_e32 v2, 31, v1
s_mov_b32 s18, 0
s_mov_b64 s[8:9], s[6:7]
s_mov_b32 s20, s10
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[1:2]
v_add_co_u32 v2, vcc_lo, s4, v2
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v3, vcc_lo
s_set_inst_prefetch_distance 0x1
s_branch .LBB0_9
.p2align 6
.LBB0_8:
s_or_b32 exec_lo, exec_lo, s23
s_xor_b32 s23, s21, -1
s_and_b32 s24, exec_lo, s22
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
s_or_b32 s18, s24, s18
s_and_not1_b32 s19, s19, exec_lo
s_and_b32 s23, s23, exec_lo
s_or_b32 s19, s19, s23
s_and_not1_b32 exec_lo, exec_lo, s18
s_cbranch_execz .LBB0_11
.LBB0_9:
global_load_b32 v5, v[2:3], off
s_load_b32 s23, s[8:9], 0x0
s_or_b32 s21, s21, exec_lo
s_or_b32 s22, s22, exec_lo
s_waitcnt vmcnt(0) lgkmcnt(0)
v_cmp_eq_u32_e32 vcc_lo, s23, v5
s_and_saveexec_b32 s23, vcc_lo
s_cbranch_execz .LBB0_8
s_add_i32 s20, s20, -1
s_add_u32 s8, s8, 4
s_addc_u32 s9, s9, 0
s_cmp_eq_u32 s20, 0
v_add_co_u32 v2, vcc_lo, v2, 4
s_cselect_b32 s24, -1, 0
v_add_co_ci_u32_e32 v3, vcc_lo, 0, v3, vcc_lo
s_and_not1_b32 s22, s22, exec_lo
s_and_b32 s24, s24, exec_lo
s_and_not1_b32 s21, s21, exec_lo
s_or_b32 s22, s22, s24
s_branch .LBB0_8
.LBB0_11:
s_set_inst_prefetch_distance 0x2
s_or_b32 exec_lo, exec_lo, s18
s_branch .LBB0_13
.LBB0_12:
s_mov_b32 s19, 0
.LBB0_13:
s_delay_alu instid0(SALU_CYCLE_1)
s_or_not1_b32 s9, s19, exec_lo
.LBB0_14:
s_or_b32 exec_lo, exec_lo, s17
s_and_saveexec_b32 s8, s9
s_cbranch_execz .LBB0_4
s_mov_b32 s9, exec_lo
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mbcnt_lo_u32_b32 v2, s9, 0
v_cmp_eq_u32_e32 vcc_lo, 0, v2
s_and_b32 s17, exec_lo, vcc_lo
s_delay_alu instid0(SALU_CYCLE_1)
s_mov_b32 exec_lo, s17
s_cbranch_execz .LBB0_4
s_bcnt1_i32_b32 s9, s9
s_delay_alu instid0(SALU_CYCLE_1)
v_mov_b32_e32 v2, s9
ds_add_u32 v4, v2
s_branch .LBB0_4
.LBB0_17:
s_or_b32 exec_lo, exec_lo, s11
v_mov_b32_e32 v0, 0
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_and_b32 s2, s2, s3
ds_load_b32 v0, v0
s_waitcnt lgkmcnt(0)
v_readfirstlane_b32 s4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
s_cmp_gt_i32 s4, 0
s_cselect_b32 s3, -1, 0
s_and_b32 s2, s2, s3
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_20
s_mov_b32 s2, exec_lo
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mbcnt_lo_u32_b32 v1, s2, 0
v_cmp_eq_u32_e32 vcc_lo, 0, v1
s_and_b32 s3, exec_lo, vcc_lo
s_delay_alu instid0(SALU_CYCLE_1)
s_mov_b32 exec_lo, s3
s_cbranch_execz .LBB0_20
s_load_b64 s[0:1], s[0:1], 0x8
s_bcnt1_i32_b32 s2, s2
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s2
v_lshlrev_b64 v[0:1], 2, v[0:1]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_atomic_add_u32 v[0:1], v2, off
.LBB0_20:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z10OPT_1_HISTPiS_i
.amdhsa_group_segment_fixed_size 4
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 25
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z10OPT_1_HISTPiS_i, .Lfunc_end0-_Z10OPT_1_HISTPiS_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 4
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z10OPT_1_HISTPiS_i
.private_segment_fixed_size: 0
.sgpr_count: 27
.sgpr_spill_count: 0
.symbol: _Z10OPT_1_HISTPiS_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0012104b_00000000-6_OPT_1_HIST.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z33__device_stub__Z10OPT_1_HISTPiS_iPiS_i
.type _Z33__device_stub__Z10OPT_1_HISTPiS_iPiS_i, @function
_Z33__device_stub__Z10OPT_1_HISTPiS_iPiS_i:
.LFB2051:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z10OPT_1_HISTPiS_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z33__device_stub__Z10OPT_1_HISTPiS_iPiS_i, .-_Z33__device_stub__Z10OPT_1_HISTPiS_iPiS_i
.globl _Z10OPT_1_HISTPiS_i
.type _Z10OPT_1_HISTPiS_i, @function
_Z10OPT_1_HISTPiS_i:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z33__device_stub__Z10OPT_1_HISTPiS_iPiS_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z10OPT_1_HISTPiS_i, .-_Z10OPT_1_HISTPiS_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z10OPT_1_HISTPiS_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z10OPT_1_HISTPiS_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "OPT_1_HIST.hip"
.globl _Z25__device_stub__OPT_1_HISTPiS_i # -- Begin function _Z25__device_stub__OPT_1_HISTPiS_i
.p2align 4, 0x90
.type _Z25__device_stub__OPT_1_HISTPiS_i,@function
_Z25__device_stub__OPT_1_HISTPiS_i: # @_Z25__device_stub__OPT_1_HISTPiS_i
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z10OPT_1_HISTPiS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z25__device_stub__OPT_1_HISTPiS_i, .Lfunc_end0-_Z25__device_stub__OPT_1_HISTPiS_i
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z10OPT_1_HISTPiS_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z10OPT_1_HISTPiS_i,@object # @_Z10OPT_1_HISTPiS_i
.section .rodata,"a",@progbits
.globl _Z10OPT_1_HISTPiS_i
.p2align 3, 0x0
_Z10OPT_1_HISTPiS_i:
.quad _Z25__device_stub__OPT_1_HISTPiS_i
.size _Z10OPT_1_HISTPiS_i, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z10OPT_1_HISTPiS_i"
.size .L__unnamed_1, 20
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z25__device_stub__OPT_1_HISTPiS_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z10OPT_1_HISTPiS_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <stdio.h>
#include <stdlib.h>
__device__ int d_value;
// Device code: GPU function
__global__ void test_Kernel()
{
int threadID = threadIdx.x;
d_value = 1;
printf("threadID %-3d d_value%3d\n", threadID, d_value);
}
// Host code: CPU function
int main()
{
int h_value = 0;
// kernelName<<<#block_per_grid, #thread_per_block, shared_size, s>>>(param1, ...);
test_Kernel<<<1, 2>>>();
cudaMemcpyFromSymbol(&h_value, d_value, sizeof(int), 0, cudaMemcpyDeviceToHost);
printf("Output from host: %d\n",h_value);
return 0;
} | code for sm_80
Function : _Z11test_Kernelv
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fc800078e00ff */
/*0010*/ S2R R10, SR_TID.X ; /* 0x00000000000a7919 */
/* 0x000e220000002100 */
/*0020*/ IADD3 R1, R1, -0x8, RZ ; /* 0xfffffff801017810 */
/* 0x000fe20007ffe0ff */
/*0030*/ IMAD.MOV.U32 R2, RZ, RZ, c[0x4][0x8] ; /* 0x01000200ff027624 */
/* 0x000fe200078e00ff */
/*0040*/ MOV R0, 0x0 ; /* 0x0000000000007802 */
/* 0x000fe20000000f00 */
/*0050*/ IMAD.MOV.U32 R3, RZ, RZ, c[0x4][0xc] ; /* 0x01000300ff037624 */
/* 0x000fe200078e00ff */
/*0060*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0070*/ IMAD.MOV.U32 R11, RZ, RZ, 0x1 ; /* 0x00000001ff0b7424 */
/* 0x000fe200078e00ff */
/*0080*/ LDC.64 R8, c[0x4][R0] ; /* 0x0100000000087b82 */
/* 0x0002a20000000a00 */
/*0090*/ IADD3 R6, P0, R1, c[0x0][0x20], RZ ; /* 0x0000080001067a10 */
/* 0x000fe20007f1e0ff */
/*00a0*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x4][0x10] ; /* 0x01000400ff047624 */
/* 0x000fe400078e00ff */
/*00b0*/ STG.E [R2.64], R11 ; /* 0x0000000b02007986 */
/* 0x0003e2000c101904 */
/*00c0*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x4][0x14] ; /* 0x01000500ff057624 */
/* 0x000fc400078e00ff */
/*00d0*/ IMAD.X R7, RZ, RZ, c[0x0][0x24], P0 ; /* 0x00000900ff077624 */
/* 0x000fe200000e06ff */
/*00e0*/ STL.64 [R1], R10 ; /* 0x0000000a01007387 */
/* 0x0013e80000100a00 */
/*00f0*/ LEPC R2 ; /* 0x000000000002734e */
/* 0x002fe40000000000 */
/*0100*/ MOV R11, 0x170 ; /* 0x00000170000b7802 */
/* 0x000fe40000000f00 */
/*0110*/ MOV R20, 0xf0 ; /* 0x000000f000147802 */
/* 0x000fe40000000f00 */
/*0120*/ MOV R21, 0x0 ; /* 0x0000000000157802 */
/* 0x000fe40000000f00 */
/*0130*/ MOV R0, 0x0 ; /* 0x0000000000007802 */
/* 0x000fe40000000f00 */
/*0140*/ IADD3 R20, P0, P1, -R20, R11, R2 ; /* 0x0000000b14147210 */
/* 0x000fc8000791e102 */
/*0150*/ IADD3.X R21, ~R0, R21, R3, P0, P1 ; /* 0x0000001500157210 */
/* 0x000fc800007e2503 */
/*0160*/ CALL.ABS.NOINC R8 ; /* 0x0000000008007343 */
/* 0x004fea0003c00000 */
/*0170*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0180*/ BRA 0x180; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0200*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
#include <stdlib.h>
__device__ int d_value;
// Device code: GPU function
__global__ void test_Kernel()
{
int threadID = threadIdx.x;
d_value = 1;
printf("threadID %-3d d_value%3d\n", threadID, d_value);
}
// Host code: CPU function
int main()
{
int h_value = 0;
// kernelName<<<#block_per_grid, #thread_per_block, shared_size, s>>>(param1, ...);
test_Kernel<<<1, 2>>>();
cudaMemcpyFromSymbol(&h_value, d_value, sizeof(int), 0, cudaMemcpyDeviceToHost);
printf("Output from host: %d\n",h_value);
return 0;
} | .file "tmpxft_00061ccf_00000000-6_deviceVariable.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z30__device_stub__Z11test_Kernelvv
.type _Z30__device_stub__Z11test_Kernelvv, @function
_Z30__device_stub__Z11test_Kernelvv:
.LFB2082:
.cfi_startproc
endbr64
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
leaq 8(%rsp), %rcx
movq %rsp, %rdx
leaq 28(%rsp), %rsi
leaq 16(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 8(%rsp)
.cfi_def_cfa_offset 104
pushq 8(%rsp)
.cfi_def_cfa_offset 112
leaq 80(%rsp), %r9
movq 44(%rsp), %rcx
movl 52(%rsp), %r8d
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
leaq _Z11test_Kernelv(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 96
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z30__device_stub__Z11test_Kernelvv, .-_Z30__device_stub__Z11test_Kernelvv
.globl _Z11test_Kernelv
.type _Z11test_Kernelv, @function
_Z11test_Kernelv:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z30__device_stub__Z11test_Kernelvv
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z11test_Kernelv, .-_Z11test_Kernelv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Output from host: %d\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
subq $56, %rsp
.cfi_def_cfa_offset 64
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
movl $0, 12(%rsp)
movl $2, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 28(%rsp), %rdx
movl $1, %ecx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L15
.L12:
leaq 12(%rsp), %rdi
movl $2, %r8d
movl $0, %ecx
movl $4, %edx
leaq _ZL7d_value(%rip), %rsi
call cudaMemcpyFromSymbol@PLT
movl 12(%rsp), %edx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L16
movl $0, %eax
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
call _Z30__device_stub__Z11test_Kernelvv
jmp .L12
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC1:
.string "_Z11test_Kernelv"
.LC2:
.string "d_value"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z11test_Kernelv(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $4, %r9d
movl $0, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _ZL7d_value(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.local _ZL7d_value
.comm _ZL7d_value,4,4
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include <stdlib.h>
__device__ int d_value;
// Device code: GPU function
__global__ void test_Kernel()
{
int threadID = threadIdx.x;
d_value = 1;
printf("threadID %-3d d_value%3d\n", threadID, d_value);
}
// Host code: CPU function
int main()
{
int h_value = 0;
// kernelName<<<#block_per_grid, #thread_per_block, shared_size, s>>>(param1, ...);
test_Kernel<<<1, 2>>>();
cudaMemcpyFromSymbol(&h_value, d_value, sizeof(int), 0, cudaMemcpyDeviceToHost);
printf("Output from host: %d\n",h_value);
return 0;
} | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
__device__ int d_value;
// Device code: GPU function
__global__ void test_Kernel()
{
int threadID = threadIdx.x;
d_value = 1;
printf("threadID %-3d d_value%3d\n", threadID, d_value);
}
// Host code: CPU function
int main()
{
int h_value = 0;
// kernelName<<<#block_per_grid, #thread_per_block, shared_size, s>>>(param1, ...);
test_Kernel<<<1, 2>>>();
hipMemcpyFromSymbol(&h_value, HIP_SYMBOL(d_value), sizeof(int), 0, hipMemcpyDeviceToHost);
printf("Output from host: %d\n",h_value);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
__device__ int d_value;
// Device code: GPU function
__global__ void test_Kernel()
{
int threadID = threadIdx.x;
d_value = 1;
printf("threadID %-3d d_value%3d\n", threadID, d_value);
}
// Host code: CPU function
int main()
{
int h_value = 0;
// kernelName<<<#block_per_grid, #thread_per_block, shared_size, s>>>(param1, ...);
test_Kernel<<<1, 2>>>();
hipMemcpyFromSymbol(&h_value, HIP_SYMBOL(d_value), sizeof(int), 0, hipMemcpyDeviceToHost);
printf("Output from host: %d\n",h_value);
return 0;
} | .text
.file "deviceVariable.hip"
.globl _Z26__device_stub__test_Kernelv # -- Begin function _Z26__device_stub__test_Kernelv
.p2align 4, 0x90
.type _Z26__device_stub__test_Kernelv,@function
_Z26__device_stub__test_Kernelv: # @_Z26__device_stub__test_Kernelv
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z11test_Kernelv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $72, %rsp
.cfi_adjust_cfa_offset -72
retq
.Lfunc_end0:
.size _Z26__device_stub__test_Kernelv, .Lfunc_end0-_Z26__device_stub__test_Kernelv
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
subq $72, %rsp
.cfi_def_cfa_offset 80
movl $0, 12(%rsp)
movabsq $4294967297, %rdi # imm = 0x100000001
leaq 1(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z11test_Kernelv, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
leaq 12(%rsp), %rdi
movl $d_value, %esi
movl $4, %edx
xorl %ecx, %ecx
movl $2, %r8d
callq hipMemcpyFromSymbol
movl 12(%rsp), %esi
movl $.L.str, %edi
xorl %eax, %eax
callq printf
xorl %eax, %eax
addq $72, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z11test_Kernelv, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $0, 8(%rsp)
movl $0, (%rsp)
movl $d_value, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movl $4, %r9d
movq %rbx, %rdi
xorl %r8d, %r8d
callq __hipRegisterVar
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type d_value,@object # @d_value
.local d_value
.comm d_value,4,4
.type _Z11test_Kernelv,@object # @_Z11test_Kernelv
.section .rodata,"a",@progbits
.globl _Z11test_Kernelv
.p2align 3, 0x0
_Z11test_Kernelv:
.quad _Z26__device_stub__test_Kernelv
.size _Z11test_Kernelv, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Output from host: %d\n"
.size .L.str, 22
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z11test_Kernelv"
.size .L__unnamed_1, 17
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "d_value"
.size .L__unnamed_2, 8
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z26__device_stub__test_Kernelv
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym d_value
.addrsig_sym _Z11test_Kernelv
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00061ccf_00000000-6_deviceVariable.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z30__device_stub__Z11test_Kernelvv
.type _Z30__device_stub__Z11test_Kernelvv, @function
_Z30__device_stub__Z11test_Kernelvv:
.LFB2082:
.cfi_startproc
endbr64
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
leaq 8(%rsp), %rcx
movq %rsp, %rdx
leaq 28(%rsp), %rsi
leaq 16(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 8(%rsp)
.cfi_def_cfa_offset 104
pushq 8(%rsp)
.cfi_def_cfa_offset 112
leaq 80(%rsp), %r9
movq 44(%rsp), %rcx
movl 52(%rsp), %r8d
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
leaq _Z11test_Kernelv(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 96
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z30__device_stub__Z11test_Kernelvv, .-_Z30__device_stub__Z11test_Kernelvv
.globl _Z11test_Kernelv
.type _Z11test_Kernelv, @function
_Z11test_Kernelv:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z30__device_stub__Z11test_Kernelvv
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z11test_Kernelv, .-_Z11test_Kernelv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Output from host: %d\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
subq $56, %rsp
.cfi_def_cfa_offset 64
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
movl $0, 12(%rsp)
movl $2, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 28(%rsp), %rdx
movl $1, %ecx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L15
.L12:
leaq 12(%rsp), %rdi
movl $2, %r8d
movl $0, %ecx
movl $4, %edx
leaq _ZL7d_value(%rip), %rsi
call cudaMemcpyFromSymbol@PLT
movl 12(%rsp), %edx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L16
movl $0, %eax
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
call _Z30__device_stub__Z11test_Kernelvv
jmp .L12
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC1:
.string "_Z11test_Kernelv"
.LC2:
.string "d_value"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z11test_Kernelv(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $4, %r9d
movl $0, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _ZL7d_value(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.local _ZL7d_value
.comm _ZL7d_value,4,4
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "deviceVariable.hip"
.globl _Z26__device_stub__test_Kernelv # -- Begin function _Z26__device_stub__test_Kernelv
.p2align 4, 0x90
.type _Z26__device_stub__test_Kernelv,@function
_Z26__device_stub__test_Kernelv: # @_Z26__device_stub__test_Kernelv
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z11test_Kernelv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $72, %rsp
.cfi_adjust_cfa_offset -72
retq
.Lfunc_end0:
.size _Z26__device_stub__test_Kernelv, .Lfunc_end0-_Z26__device_stub__test_Kernelv
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
subq $72, %rsp
.cfi_def_cfa_offset 80
movl $0, 12(%rsp)
movabsq $4294967297, %rdi # imm = 0x100000001
leaq 1(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z11test_Kernelv, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
leaq 12(%rsp), %rdi
movl $d_value, %esi
movl $4, %edx
xorl %ecx, %ecx
movl $2, %r8d
callq hipMemcpyFromSymbol
movl 12(%rsp), %esi
movl $.L.str, %edi
xorl %eax, %eax
callq printf
xorl %eax, %eax
addq $72, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z11test_Kernelv, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $0, 8(%rsp)
movl $0, (%rsp)
movl $d_value, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movl $4, %r9d
movq %rbx, %rdi
xorl %r8d, %r8d
callq __hipRegisterVar
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type d_value,@object # @d_value
.local d_value
.comm d_value,4,4
.type _Z11test_Kernelv,@object # @_Z11test_Kernelv
.section .rodata,"a",@progbits
.globl _Z11test_Kernelv
.p2align 3, 0x0
_Z11test_Kernelv:
.quad _Z26__device_stub__test_Kernelv
.size _Z11test_Kernelv, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Output from host: %d\n"
.size .L.str, 22
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z11test_Kernelv"
.size .L__unnamed_1, 17
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "d_value"
.size .L__unnamed_2, 8
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z26__device_stub__test_Kernelv
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym d_value
.addrsig_sym _Z11test_Kernelv
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | /** @file processMandelbrotElement.cu
*
* Copyright 2010 The Mathworks, Inc.
* $Revision: 1$
* $Date: 2010-11-08$
*/
/** Work out which piece of the global array this thread should operate on */
__device__ size_t calculateGlobalIndex() {
// Which block are we?
size_t const globalBlockIndex = blockIdx.x + blockIdx.y * gridDim.x;
// Which thread are we within the block?
size_t const localThreadIdx = threadIdx.x + blockDim.x * threadIdx.y;
// How big is each block?
size_t const threadsPerBlock = blockDim.x*blockDim.y;
// Which thread are we overall?
return localThreadIdx + globalBlockIndex*threadsPerBlock;
}
/** The actual Mandelbrot algorithm for a single location */
__device__ double doIterations( double const realPart0,
double const imagPart0,
double const escapeRadius,
unsigned int const maxIters ) {
// Initialise: z = z0
double const escapeRadius2 = escapeRadius*escapeRadius;
double realPart = realPart0;
double imagPart = imagPart0;
unsigned int count = 0;
// Loop until escape
while ( ( count <= maxIters )
&& ((realPart*realPart + imagPart*imagPart) <= escapeRadius2) ) {
++count;
// Update: z = z*z + z0;
double const oldRealPart = realPart;
realPart = realPart*realPart - imagPart*imagPart + realPart0;
imagPart = 2.0*oldRealPart*imagPart + imagPart0;
}
// Correct final position for smooth shading
double const absZ2 = ( realPart*realPart + imagPart*imagPart );
if (absZ2<escapeRadius2) {
return double(count) + 1.0 - log( log( escapeRadius2 ) / 2.0 ) / log(2.0);
} else {
return double(count) + 1.0 - log( log( absZ2 ) / 2.0 ) / log(2.0);
}
}
/** Main entry point.
* Works out where the current thread should read/write to global memory
* and calls doIterations to do the actual work.
*/
__global__ void processMandelbrotElement(
double * out,
const double * x,
const double * y,
const double escapeRadius,
const unsigned int maxIters,
const unsigned int numel ) {
// Work out which thread we are
size_t const globalThreadIdx = calculateGlobalIndex();
// If we're off the end, return now
if (globalThreadIdx >= numel) {
return;
}
// Get our X and Y coords
double const realPart0 = x[globalThreadIdx];
double const imagPart0 = y[globalThreadIdx];
// Run the itearations on this location
double const count = doIterations( realPart0, imagPart0, escapeRadius, maxIters );
out[globalThreadIdx] = log( count );
} | .file "tmpxft_0004521d_00000000-6_mandelbrotViewerProcessElement.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2031:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2031:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z20calculateGlobalIndexv
.type _Z20calculateGlobalIndexv, @function
_Z20calculateGlobalIndexv:
.LFB2027:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2027:
.size _Z20calculateGlobalIndexv, .-_Z20calculateGlobalIndexv
.globl _Z12doIterationsdddj
.type _Z12doIterationsdddj, @function
_Z12doIterationsdddj:
.LFB2028:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2028:
.size _Z12doIterationsdddj, .-_Z12doIterationsdddj
.globl _Z53__device_stub__Z24processMandelbrotElementPdPKdS1_djjPdPKdS1_djj
.type _Z53__device_stub__Z24processMandelbrotElementPdPKdS1_djjPdPKdS1_djj, @function
_Z53__device_stub__Z24processMandelbrotElementPdPKdS1_djjPdPKdS1_djj:
.LFB2053:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movsd %xmm0, 16(%rsp)
movl %ecx, 12(%rsp)
movl %r8d, 8(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 12(%rsp), %rax
movq %rax, 144(%rsp)
leaq 8(%rsp), %rax
movq %rax, 152(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L11
.L7:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L12
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L11:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z24processMandelbrotElementPdPKdS1_djj(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L7
.L12:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2053:
.size _Z53__device_stub__Z24processMandelbrotElementPdPKdS1_djjPdPKdS1_djj, .-_Z53__device_stub__Z24processMandelbrotElementPdPKdS1_djjPdPKdS1_djj
.globl _Z24processMandelbrotElementPdPKdS1_djj
.type _Z24processMandelbrotElementPdPKdS1_djj, @function
_Z24processMandelbrotElementPdPKdS1_djj:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z53__device_stub__Z24processMandelbrotElementPdPKdS1_djjPdPKdS1_djj
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _Z24processMandelbrotElementPdPKdS1_djj, .-_Z24processMandelbrotElementPdPKdS1_djj
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z24processMandelbrotElementPdPKdS1_djj"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2056:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z24processMandelbrotElementPdPKdS1_djj(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2056:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | /** @file processMandelbrotElement.cu
*
* Copyright 2010 The Mathworks, Inc.
* $Revision: 1$
* $Date: 2010-11-08$
*/
/** Work out which piece of the global array this thread should operate on */
__device__ size_t calculateGlobalIndex() {
// Which block are we?
size_t const globalBlockIndex = blockIdx.x + blockIdx.y * gridDim.x;
// Which thread are we within the block?
size_t const localThreadIdx = threadIdx.x + blockDim.x * threadIdx.y;
// How big is each block?
size_t const threadsPerBlock = blockDim.x*blockDim.y;
// Which thread are we overall?
return localThreadIdx + globalBlockIndex*threadsPerBlock;
}
/** The actual Mandelbrot algorithm for a single location */
__device__ double doIterations( double const realPart0,
double const imagPart0,
double const escapeRadius,
unsigned int const maxIters ) {
// Initialise: z = z0
double const escapeRadius2 = escapeRadius*escapeRadius;
double realPart = realPart0;
double imagPart = imagPart0;
unsigned int count = 0;
// Loop until escape
while ( ( count <= maxIters )
&& ((realPart*realPart + imagPart*imagPart) <= escapeRadius2) ) {
++count;
// Update: z = z*z + z0;
double const oldRealPart = realPart;
realPart = realPart*realPart - imagPart*imagPart + realPart0;
imagPart = 2.0*oldRealPart*imagPart + imagPart0;
}
// Correct final position for smooth shading
double const absZ2 = ( realPart*realPart + imagPart*imagPart );
if (absZ2<escapeRadius2) {
return double(count) + 1.0 - log( log( escapeRadius2 ) / 2.0 ) / log(2.0);
} else {
return double(count) + 1.0 - log( log( absZ2 ) / 2.0 ) / log(2.0);
}
}
/** Main entry point.
* Works out where the current thread should read/write to global memory
* and calls doIterations to do the actual work.
*/
__global__ void processMandelbrotElement(
double * out,
const double * x,
const double * y,
const double escapeRadius,
const unsigned int maxIters,
const unsigned int numel ) {
// Work out which thread we are
size_t const globalThreadIdx = calculateGlobalIndex();
// If we're off the end, return now
if (globalThreadIdx >= numel) {
return;
}
// Get our X and Y coords
double const realPart0 = x[globalThreadIdx];
double const imagPart0 = y[globalThreadIdx];
// Run the itearations on this location
double const count = doIterations( realPart0, imagPart0, escapeRadius, maxIters );
out[globalThreadIdx] = log( count );
} | #include <hip/hip_runtime.h>
/** @file processMandelbrotElement.cu
*
* Copyright 2010 The Mathworks, Inc.
* $Revision: 1$
* $Date: 2010-11-08$
*/
/** Work out which piece of the global array this thread should operate on */
__device__ size_t calculateGlobalIndex() {
// Which block are we?
size_t const globalBlockIndex = blockIdx.x + blockIdx.y * gridDim.x;
// Which thread are we within the block?
size_t const localThreadIdx = threadIdx.x + blockDim.x * threadIdx.y;
// How big is each block?
size_t const threadsPerBlock = blockDim.x*blockDim.y;
// Which thread are we overall?
return localThreadIdx + globalBlockIndex*threadsPerBlock;
}
/** The actual Mandelbrot algorithm for a single location */
__device__ double doIterations( double const realPart0,
double const imagPart0,
double const escapeRadius,
unsigned int const maxIters ) {
// Initialise: z = z0
double const escapeRadius2 = escapeRadius*escapeRadius;
double realPart = realPart0;
double imagPart = imagPart0;
unsigned int count = 0;
// Loop until escape
while ( ( count <= maxIters )
&& ((realPart*realPart + imagPart*imagPart) <= escapeRadius2) ) {
++count;
// Update: z = z*z + z0;
double const oldRealPart = realPart;
realPart = realPart*realPart - imagPart*imagPart + realPart0;
imagPart = 2.0*oldRealPart*imagPart + imagPart0;
}
// Correct final position for smooth shading
double const absZ2 = ( realPart*realPart + imagPart*imagPart );
if (absZ2<escapeRadius2) {
return double(count) + 1.0 - log( log( escapeRadius2 ) / 2.0 ) / log(2.0);
} else {
return double(count) + 1.0 - log( log( absZ2 ) / 2.0 ) / log(2.0);
}
}
/** Main entry point.
* Works out where the current thread should read/write to global memory
* and calls doIterations to do the actual work.
*/
__global__ void processMandelbrotElement(
double * out,
const double * x,
const double * y,
const double escapeRadius,
const unsigned int maxIters,
const unsigned int numel ) {
// Work out which thread we are
size_t const globalThreadIdx = calculateGlobalIndex();
// If we're off the end, return now
if (globalThreadIdx >= numel) {
return;
}
// Get our X and Y coords
double const realPart0 = x[globalThreadIdx];
double const imagPart0 = y[globalThreadIdx];
// Run the itearations on this location
double const count = doIterations( realPart0, imagPart0, escapeRadius, maxIters );
out[globalThreadIdx] = log( count );
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
/** @file processMandelbrotElement.cu
*
* Copyright 2010 The Mathworks, Inc.
* $Revision: 1$
* $Date: 2010-11-08$
*/
/** Work out which piece of the global array this thread should operate on */
__device__ size_t calculateGlobalIndex() {
// Which block are we?
size_t const globalBlockIndex = blockIdx.x + blockIdx.y * gridDim.x;
// Which thread are we within the block?
size_t const localThreadIdx = threadIdx.x + blockDim.x * threadIdx.y;
// How big is each block?
size_t const threadsPerBlock = blockDim.x*blockDim.y;
// Which thread are we overall?
return localThreadIdx + globalBlockIndex*threadsPerBlock;
}
/** The actual Mandelbrot algorithm for a single location */
__device__ double doIterations( double const realPart0,
double const imagPart0,
double const escapeRadius,
unsigned int const maxIters ) {
// Initialise: z = z0
double const escapeRadius2 = escapeRadius*escapeRadius;
double realPart = realPart0;
double imagPart = imagPart0;
unsigned int count = 0;
// Loop until escape
while ( ( count <= maxIters )
&& ((realPart*realPart + imagPart*imagPart) <= escapeRadius2) ) {
++count;
// Update: z = z*z + z0;
double const oldRealPart = realPart;
realPart = realPart*realPart - imagPart*imagPart + realPart0;
imagPart = 2.0*oldRealPart*imagPart + imagPart0;
}
// Correct final position for smooth shading
double const absZ2 = ( realPart*realPart + imagPart*imagPart );
if (absZ2<escapeRadius2) {
return double(count) + 1.0 - log( log( escapeRadius2 ) / 2.0 ) / log(2.0);
} else {
return double(count) + 1.0 - log( log( absZ2 ) / 2.0 ) / log(2.0);
}
}
/** Main entry point.
* Works out where the current thread should read/write to global memory
* and calls doIterations to do the actual work.
*/
__global__ void processMandelbrotElement(
double * out,
const double * x,
const double * y,
const double escapeRadius,
const unsigned int maxIters,
const unsigned int numel ) {
// Work out which thread we are
size_t const globalThreadIdx = calculateGlobalIndex();
// If we're off the end, return now
if (globalThreadIdx >= numel) {
return;
}
// Get our X and Y coords
double const realPart0 = x[globalThreadIdx];
double const imagPart0 = y[globalThreadIdx];
// Run the itearations on this location
double const count = doIterations( realPart0, imagPart0, escapeRadius, maxIters );
out[globalThreadIdx] = log( count );
} | .text
.file "mandelbrotViewerProcessElement.hip"
.globl _Z39__device_stub__processMandelbrotElementPdPKdS1_djj # -- Begin function _Z39__device_stub__processMandelbrotElementPdPKdS1_djj
.p2align 4, 0x90
.type _Z39__device_stub__processMandelbrotElementPdPKdS1_djj,@function
_Z39__device_stub__processMandelbrotElementPdPKdS1_djj: # @_Z39__device_stub__processMandelbrotElementPdPKdS1_djj
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movsd %xmm0, 64(%rsp)
movl %ecx, 12(%rsp)
movl %r8d, 8(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 12(%rsp), %rax
movq %rax, 128(%rsp)
leaq 8(%rsp), %rax
movq %rax, 136(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z24processMandelbrotElementPdPKdS1_djj, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end0:
.size _Z39__device_stub__processMandelbrotElementPdPKdS1_djj, .Lfunc_end0-_Z39__device_stub__processMandelbrotElementPdPKdS1_djj
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z24processMandelbrotElementPdPKdS1_djj, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z24processMandelbrotElementPdPKdS1_djj,@object # @_Z24processMandelbrotElementPdPKdS1_djj
.section .rodata,"a",@progbits
.globl _Z24processMandelbrotElementPdPKdS1_djj
.p2align 3, 0x0
_Z24processMandelbrotElementPdPKdS1_djj:
.quad _Z39__device_stub__processMandelbrotElementPdPKdS1_djj
.size _Z24processMandelbrotElementPdPKdS1_djj, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z24processMandelbrotElementPdPKdS1_djj"
.size .L__unnamed_1, 40
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z39__device_stub__processMandelbrotElementPdPKdS1_djj
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z24processMandelbrotElementPdPKdS1_djj
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0004521d_00000000-6_mandelbrotViewerProcessElement.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2031:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2031:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z20calculateGlobalIndexv
.type _Z20calculateGlobalIndexv, @function
_Z20calculateGlobalIndexv:
.LFB2027:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2027:
.size _Z20calculateGlobalIndexv, .-_Z20calculateGlobalIndexv
.globl _Z12doIterationsdddj
.type _Z12doIterationsdddj, @function
_Z12doIterationsdddj:
.LFB2028:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2028:
.size _Z12doIterationsdddj, .-_Z12doIterationsdddj
.globl _Z53__device_stub__Z24processMandelbrotElementPdPKdS1_djjPdPKdS1_djj
.type _Z53__device_stub__Z24processMandelbrotElementPdPKdS1_djjPdPKdS1_djj, @function
_Z53__device_stub__Z24processMandelbrotElementPdPKdS1_djjPdPKdS1_djj:
.LFB2053:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movsd %xmm0, 16(%rsp)
movl %ecx, 12(%rsp)
movl %r8d, 8(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 12(%rsp), %rax
movq %rax, 144(%rsp)
leaq 8(%rsp), %rax
movq %rax, 152(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L11
.L7:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L12
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L11:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z24processMandelbrotElementPdPKdS1_djj(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L7
.L12:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2053:
.size _Z53__device_stub__Z24processMandelbrotElementPdPKdS1_djjPdPKdS1_djj, .-_Z53__device_stub__Z24processMandelbrotElementPdPKdS1_djjPdPKdS1_djj
.globl _Z24processMandelbrotElementPdPKdS1_djj
.type _Z24processMandelbrotElementPdPKdS1_djj, @function
_Z24processMandelbrotElementPdPKdS1_djj:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z53__device_stub__Z24processMandelbrotElementPdPKdS1_djjPdPKdS1_djj
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _Z24processMandelbrotElementPdPKdS1_djj, .-_Z24processMandelbrotElementPdPKdS1_djj
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z24processMandelbrotElementPdPKdS1_djj"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2056:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z24processMandelbrotElementPdPKdS1_djj(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2056:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "mandelbrotViewerProcessElement.hip"
.globl _Z39__device_stub__processMandelbrotElementPdPKdS1_djj # -- Begin function _Z39__device_stub__processMandelbrotElementPdPKdS1_djj
.p2align 4, 0x90
.type _Z39__device_stub__processMandelbrotElementPdPKdS1_djj,@function
_Z39__device_stub__processMandelbrotElementPdPKdS1_djj: # @_Z39__device_stub__processMandelbrotElementPdPKdS1_djj
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movsd %xmm0, 64(%rsp)
movl %ecx, 12(%rsp)
movl %r8d, 8(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 12(%rsp), %rax
movq %rax, 128(%rsp)
leaq 8(%rsp), %rax
movq %rax, 136(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z24processMandelbrotElementPdPKdS1_djj, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end0:
.size _Z39__device_stub__processMandelbrotElementPdPKdS1_djj, .Lfunc_end0-_Z39__device_stub__processMandelbrotElementPdPKdS1_djj
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z24processMandelbrotElementPdPKdS1_djj, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z24processMandelbrotElementPdPKdS1_djj,@object # @_Z24processMandelbrotElementPdPKdS1_djj
.section .rodata,"a",@progbits
.globl _Z24processMandelbrotElementPdPKdS1_djj
.p2align 3, 0x0
_Z24processMandelbrotElementPdPKdS1_djj:
.quad _Z39__device_stub__processMandelbrotElementPdPKdS1_djj
.size _Z24processMandelbrotElementPdPKdS1_djj, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z24processMandelbrotElementPdPKdS1_djj"
.size .L__unnamed_1, 40
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z39__device_stub__processMandelbrotElementPdPKdS1_djj
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z24processMandelbrotElementPdPKdS1_djj
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <cuda_runtime.h>
#include <cuda_fp16.h>
#include <stdio.h>
__device__ float reference(float x)
{
double y = x;
return y * tanh(log1p(exp(y)));
}
__device__ float mish_final(float value)
{
auto e = __expf(value);
auto n = e * e + 2 * e;
if (value <= -0.6f)
return value * __fdividef(n, n + 2);
return value - 2 * __fdividef(value, n + 2);
}
__device__ half mish_half_old(half value)
{
return value * half(tanhf(hlog(half(1) + hexp(value))));
}
__device__ half mish_half_final(half value)
{
if (value > half(3.999))
return value;
auto e = hexp(value);
auto n = e * e + half(2) * e;
return value * n / (n + half(2));
}
__global__ void test()
{
for (float x = 0; x < 6; x += 0.0001)
{
// double precision reference
float ref = reference(x);
half h = x;
float expr1 = [=] {
return h * half(tanhf(hlog(half(1.0f) + hexp(h))));
} ();
auto e = hexp(h);
auto n = e * e + half(2) * e;
float expr2 = h * n / (n + half(2));
float expr3 = x; // h - half(2) * h / (n + half(2));
double err1 = abs(double(ref) - double(expr1));
double err2 = abs(double(ref) - double(expr2));
double err3 = abs(double(ref) - double(expr3));
int temp;
printf("[x=%f] %.7e %.7e %.7e %.7e (%.7e, %.7e, %.7e, %.7e)\n",
x, ref, expr1, expr2, expr3,
//frexpf(ref, &temp), frexpf(expr1, &temp), frexpf(expr2, &temp), frexpf(expr3, &temp),
0.0f, float(err1), float(err2), float(err3));
}
}
__global__ void test_final()
{
for (float x = -100; x < 100; x += 0.1)
{
float ref = reference(x);
float expr = mish_half_final(x);
printf("[x=%f] %.7e %.7e (err=%.8e)\n", x, ref, expr, abs(expr - ref));
}
}
__global__ void dump()
{
for (float x = -20; x < 50; x += 0.0001)
printf("%.7f %.7e\n", x, static_cast<float>(mish_half_final(x)));
}
int main ()
{
dump<<<1, 1>>>();
cudaDeviceSynchronize();
return 0;
} | .file "tmpxft_000c75d6_00000000-6_mish_design_half.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2438:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2438:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z9referencef
.type _Z9referencef, @function
_Z9referencef:
.LFB2431:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2431:
.size _Z9referencef, .-_Z9referencef
.globl _Z10mish_finalf
.type _Z10mish_finalf, @function
_Z10mish_finalf:
.LFB2432:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2432:
.size _Z10mish_finalf, .-_Z10mish_finalf
.globl _Z13mish_half_old6__half
.type _Z13mish_half_old6__half, @function
_Z13mish_half_old6__half:
.LFB2433:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2433:
.size _Z13mish_half_old6__half, .-_Z13mish_half_old6__half
.globl _Z15mish_half_final6__half
.type _Z15mish_half_final6__half, @function
_Z15mish_half_final6__half:
.LFB2434:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2434:
.size _Z15mish_half_final6__half, .-_Z15mish_half_final6__half
.globl _Z22__device_stub__Z4testvv
.type _Z22__device_stub__Z4testvv, @function
_Z22__device_stub__Z4testvv:
.LFB2460:
.cfi_startproc
endbr64
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
leaq 8(%rsp), %rcx
movq %rsp, %rdx
leaq 28(%rsp), %rsi
leaq 16(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 8(%rsp)
.cfi_def_cfa_offset 104
pushq 8(%rsp)
.cfi_def_cfa_offset 112
leaq 80(%rsp), %r9
movq 44(%rsp), %rcx
movl 52(%rsp), %r8d
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
leaq _Z4testv(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 96
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2460:
.size _Z22__device_stub__Z4testvv, .-_Z22__device_stub__Z4testvv
.globl _Z4testv
.type _Z4testv, @function
_Z4testv:
.LFB2461:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z22__device_stub__Z4testvv
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2461:
.size _Z4testv, .-_Z4testv
.globl _Z29__device_stub__Z10test_finalvv
.type _Z29__device_stub__Z10test_finalvv, @function
_Z29__device_stub__Z10test_finalvv:
.LFB2462:
.cfi_startproc
endbr64
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
leaq 8(%rsp), %rcx
movq %rsp, %rdx
leaq 28(%rsp), %rsi
leaq 16(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L23
.L19:
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L24
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L23:
.cfi_restore_state
pushq 8(%rsp)
.cfi_def_cfa_offset 104
pushq 8(%rsp)
.cfi_def_cfa_offset 112
leaq 80(%rsp), %r9
movq 44(%rsp), %rcx
movl 52(%rsp), %r8d
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
leaq _Z10test_finalv(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 96
jmp .L19
.L24:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2462:
.size _Z29__device_stub__Z10test_finalvv, .-_Z29__device_stub__Z10test_finalvv
.globl _Z10test_finalv
.type _Z10test_finalv, @function
_Z10test_finalv:
.LFB2463:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z29__device_stub__Z10test_finalvv
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2463:
.size _Z10test_finalv, .-_Z10test_finalv
.globl _Z22__device_stub__Z4dumpvv
.type _Z22__device_stub__Z4dumpvv, @function
_Z22__device_stub__Z4dumpvv:
.LFB2464:
.cfi_startproc
endbr64
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
leaq 8(%rsp), %rcx
movq %rsp, %rdx
leaq 28(%rsp), %rsi
leaq 16(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L31
.L27:
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L32
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L31:
.cfi_restore_state
pushq 8(%rsp)
.cfi_def_cfa_offset 104
pushq 8(%rsp)
.cfi_def_cfa_offset 112
leaq 80(%rsp), %r9
movq 44(%rsp), %rcx
movl 52(%rsp), %r8d
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
leaq _Z4dumpv(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 96
jmp .L27
.L32:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2464:
.size _Z22__device_stub__Z4dumpvv, .-_Z22__device_stub__Z4dumpvv
.globl _Z4dumpv
.type _Z4dumpv, @function
_Z4dumpv:
.LFB2465:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z22__device_stub__Z4dumpvv
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2465:
.size _Z4dumpv, .-_Z4dumpv
.globl main
.type main, @function
main:
.LFB2435:
.cfi_startproc
endbr64
subq $40, %rsp
.cfi_def_cfa_offset 48
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 8(%rsp)
movl $1, 12(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L38
.L36:
call cudaDeviceSynchronize@PLT
movl $0, %eax
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L38:
.cfi_restore_state
call _Z22__device_stub__Z4dumpvv
jmp .L36
.cfi_endproc
.LFE2435:
.size main, .-main
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z4dumpv"
.LC1:
.string "_Z10test_finalv"
.LC2:
.string "_Z4testv"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2467:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z4dumpv(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z10test_finalv(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z4testv(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2467:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <cuda_runtime.h>
#include <cuda_fp16.h>
#include <stdio.h>
__device__ float reference(float x)
{
double y = x;
return y * tanh(log1p(exp(y)));
}
__device__ float mish_final(float value)
{
auto e = __expf(value);
auto n = e * e + 2 * e;
if (value <= -0.6f)
return value * __fdividef(n, n + 2);
return value - 2 * __fdividef(value, n + 2);
}
__device__ half mish_half_old(half value)
{
return value * half(tanhf(hlog(half(1) + hexp(value))));
}
__device__ half mish_half_final(half value)
{
if (value > half(3.999))
return value;
auto e = hexp(value);
auto n = e * e + half(2) * e;
return value * n / (n + half(2));
}
__global__ void test()
{
for (float x = 0; x < 6; x += 0.0001)
{
// double precision reference
float ref = reference(x);
half h = x;
float expr1 = [=] {
return h * half(tanhf(hlog(half(1.0f) + hexp(h))));
} ();
auto e = hexp(h);
auto n = e * e + half(2) * e;
float expr2 = h * n / (n + half(2));
float expr3 = x; // h - half(2) * h / (n + half(2));
double err1 = abs(double(ref) - double(expr1));
double err2 = abs(double(ref) - double(expr2));
double err3 = abs(double(ref) - double(expr3));
int temp;
printf("[x=%f] %.7e %.7e %.7e %.7e (%.7e, %.7e, %.7e, %.7e)\n",
x, ref, expr1, expr2, expr3,
//frexpf(ref, &temp), frexpf(expr1, &temp), frexpf(expr2, &temp), frexpf(expr3, &temp),
0.0f, float(err1), float(err2), float(err3));
}
}
__global__ void test_final()
{
for (float x = -100; x < 100; x += 0.1)
{
float ref = reference(x);
float expr = mish_half_final(x);
printf("[x=%f] %.7e %.7e (err=%.8e)\n", x, ref, expr, abs(expr - ref));
}
}
__global__ void dump()
{
for (float x = -20; x < 50; x += 0.0001)
printf("%.7f %.7e\n", x, static_cast<float>(mish_half_final(x)));
}
int main ()
{
dump<<<1, 1>>>();
cudaDeviceSynchronize();
return 0;
} | #include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include <stdio.h>
__device__ float reference(float x)
{
double y = x;
return y * tanh(log1p(exp(y)));
}
__device__ float mish_final(float value)
{
auto e = __expf(value);
auto n = e * e + 2 * e;
if (value <= -0.6f)
return value * __fdividef(n, n + 2);
return value - 2 * __fdividef(value, n + 2);
}
__device__ half mish_half_old(half value)
{
return value * half(tanhf(hlog(half(1) + hexp(value))));
}
__device__ half mish_half_final(half value)
{
if (value > half(3.999))
return value;
auto e = hexp(value);
auto n = e * e + half(2) * e;
return value * n / (n + half(2));
}
__global__ void test()
{
for (float x = 0; x < 6; x += 0.0001)
{
// double precision reference
float ref = reference(x);
half h = x;
float expr1 = [=] {
return h * half(tanhf(hlog(half(1.0f) + hexp(h))));
} ();
auto e = hexp(h);
auto n = e * e + half(2) * e;
float expr2 = h * n / (n + half(2));
float expr3 = x; // h - half(2) * h / (n + half(2));
double err1 = abs(double(ref) - double(expr1));
double err2 = abs(double(ref) - double(expr2));
double err3 = abs(double(ref) - double(expr3));
int temp;
printf("[x=%f] %.7e %.7e %.7e %.7e (%.7e, %.7e, %.7e, %.7e)\n",
x, ref, expr1, expr2, expr3,
//frexpf(ref, &temp), frexpf(expr1, &temp), frexpf(expr2, &temp), frexpf(expr3, &temp),
0.0f, float(err1), float(err2), float(err3));
}
}
__global__ void test_final()
{
for (float x = -100; x < 100; x += 0.1)
{
float ref = reference(x);
float expr = mish_half_final(x);
printf("[x=%f] %.7e %.7e (err=%.8e)\n", x, ref, expr, abs(expr - ref));
}
}
__global__ void dump()
{
for (float x = -20; x < 50; x += 0.0001)
printf("%.7f %.7e\n", x, static_cast<float>(mish_half_final(x)));
}
int main ()
{
dump<<<1, 1>>>();
hipDeviceSynchronize();
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include <stdio.h>
__device__ float reference(float x)
{
double y = x;
return y * tanh(log1p(exp(y)));
}
__device__ float mish_final(float value)
{
auto e = __expf(value);
auto n = e * e + 2 * e;
if (value <= -0.6f)
return value * __fdividef(n, n + 2);
return value - 2 * __fdividef(value, n + 2);
}
__device__ half mish_half_old(half value)
{
return value * half(tanhf(hlog(half(1) + hexp(value))));
}
__device__ half mish_half_final(half value)
{
if (value > half(3.999))
return value;
auto e = hexp(value);
auto n = e * e + half(2) * e;
return value * n / (n + half(2));
}
__global__ void test()
{
for (float x = 0; x < 6; x += 0.0001)
{
// double precision reference
float ref = reference(x);
half h = x;
float expr1 = [=] {
return h * half(tanhf(hlog(half(1.0f) + hexp(h))));
} ();
auto e = hexp(h);
auto n = e * e + half(2) * e;
float expr2 = h * n / (n + half(2));
float expr3 = x; // h - half(2) * h / (n + half(2));
double err1 = abs(double(ref) - double(expr1));
double err2 = abs(double(ref) - double(expr2));
double err3 = abs(double(ref) - double(expr3));
int temp;
printf("[x=%f] %.7e %.7e %.7e %.7e (%.7e, %.7e, %.7e, %.7e)\n",
x, ref, expr1, expr2, expr3,
//frexpf(ref, &temp), frexpf(expr1, &temp), frexpf(expr2, &temp), frexpf(expr3, &temp),
0.0f, float(err1), float(err2), float(err3));
}
}
__global__ void test_final()
{
for (float x = -100; x < 100; x += 0.1)
{
float ref = reference(x);
float expr = mish_half_final(x);
printf("[x=%f] %.7e %.7e (err=%.8e)\n", x, ref, expr, abs(expr - ref));
}
}
__global__ void dump()
{
for (float x = -20; x < 50; x += 0.0001)
printf("%.7f %.7e\n", x, static_cast<float>(mish_half_final(x)));
}
int main ()
{
dump<<<1, 1>>>();
hipDeviceSynchronize();
return 0;
} | .text
.file "mish_design_half.hip"
.globl _Z19__device_stub__testv # -- Begin function _Z19__device_stub__testv
.p2align 4, 0x90
.type _Z19__device_stub__testv,@function
_Z19__device_stub__testv: # @_Z19__device_stub__testv
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z4testv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $72, %rsp
.cfi_adjust_cfa_offset -72
retq
.Lfunc_end0:
.size _Z19__device_stub__testv, .Lfunc_end0-_Z19__device_stub__testv
.cfi_endproc
# -- End function
.globl _Z25__device_stub__test_finalv # -- Begin function _Z25__device_stub__test_finalv
.p2align 4, 0x90
.type _Z25__device_stub__test_finalv,@function
_Z25__device_stub__test_finalv: # @_Z25__device_stub__test_finalv
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z10test_finalv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $72, %rsp
.cfi_adjust_cfa_offset -72
retq
.Lfunc_end1:
.size _Z25__device_stub__test_finalv, .Lfunc_end1-_Z25__device_stub__test_finalv
.cfi_endproc
# -- End function
.globl _Z19__device_stub__dumpv # -- Begin function _Z19__device_stub__dumpv
.p2align 4, 0x90
.type _Z19__device_stub__dumpv,@function
_Z19__device_stub__dumpv: # @_Z19__device_stub__dumpv
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z4dumpv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $72, %rsp
.cfi_adjust_cfa_offset -72
retq
.Lfunc_end2:
.size _Z19__device_stub__dumpv, .Lfunc_end2-_Z19__device_stub__dumpv
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
movabsq $4294967297, %rdi # imm = 0x100000001
movl $1, %esi
movq %rdi, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_2
# %bb.1:
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z4dumpv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_2:
callq hipDeviceSynchronize
xorl %eax, %eax
addq $56, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end3:
.size main, .Lfunc_end3-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z4testv, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z10test_finalv, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z4dumpv, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z4testv,@object # @_Z4testv
.section .rodata,"a",@progbits
.globl _Z4testv
.p2align 3, 0x0
_Z4testv:
.quad _Z19__device_stub__testv
.size _Z4testv, 8
.type _Z10test_finalv,@object # @_Z10test_finalv
.globl _Z10test_finalv
.p2align 3, 0x0
_Z10test_finalv:
.quad _Z25__device_stub__test_finalv
.size _Z10test_finalv, 8
.type _Z4dumpv,@object # @_Z4dumpv
.globl _Z4dumpv
.p2align 3, 0x0
_Z4dumpv:
.quad _Z19__device_stub__dumpv
.size _Z4dumpv, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z4testv"
.size .L__unnamed_1, 9
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z10test_finalv"
.size .L__unnamed_2, 16
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "_Z4dumpv"
.size .L__unnamed_3, 9
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z19__device_stub__testv
.addrsig_sym _Z25__device_stub__test_finalv
.addrsig_sym _Z19__device_stub__dumpv
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z4testv
.addrsig_sym _Z10test_finalv
.addrsig_sym _Z4dumpv
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000c75d6_00000000-6_mish_design_half.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2438:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2438:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z9referencef
.type _Z9referencef, @function
_Z9referencef:
.LFB2431:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2431:
.size _Z9referencef, .-_Z9referencef
.globl _Z10mish_finalf
.type _Z10mish_finalf, @function
_Z10mish_finalf:
.LFB2432:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2432:
.size _Z10mish_finalf, .-_Z10mish_finalf
.globl _Z13mish_half_old6__half
.type _Z13mish_half_old6__half, @function
_Z13mish_half_old6__half:
.LFB2433:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2433:
.size _Z13mish_half_old6__half, .-_Z13mish_half_old6__half
.globl _Z15mish_half_final6__half
.type _Z15mish_half_final6__half, @function
_Z15mish_half_final6__half:
.LFB2434:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2434:
.size _Z15mish_half_final6__half, .-_Z15mish_half_final6__half
.globl _Z22__device_stub__Z4testvv
.type _Z22__device_stub__Z4testvv, @function
_Z22__device_stub__Z4testvv:
.LFB2460:
.cfi_startproc
endbr64
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
leaq 8(%rsp), %rcx
movq %rsp, %rdx
leaq 28(%rsp), %rsi
leaq 16(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 8(%rsp)
.cfi_def_cfa_offset 104
pushq 8(%rsp)
.cfi_def_cfa_offset 112
leaq 80(%rsp), %r9
movq 44(%rsp), %rcx
movl 52(%rsp), %r8d
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
leaq _Z4testv(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 96
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2460:
.size _Z22__device_stub__Z4testvv, .-_Z22__device_stub__Z4testvv
.globl _Z4testv
.type _Z4testv, @function
_Z4testv:
.LFB2461:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z22__device_stub__Z4testvv
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2461:
.size _Z4testv, .-_Z4testv
.globl _Z29__device_stub__Z10test_finalvv
.type _Z29__device_stub__Z10test_finalvv, @function
_Z29__device_stub__Z10test_finalvv:
.LFB2462:
.cfi_startproc
endbr64
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
leaq 8(%rsp), %rcx
movq %rsp, %rdx
leaq 28(%rsp), %rsi
leaq 16(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L23
.L19:
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L24
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L23:
.cfi_restore_state
pushq 8(%rsp)
.cfi_def_cfa_offset 104
pushq 8(%rsp)
.cfi_def_cfa_offset 112
leaq 80(%rsp), %r9
movq 44(%rsp), %rcx
movl 52(%rsp), %r8d
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
leaq _Z10test_finalv(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 96
jmp .L19
.L24:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2462:
.size _Z29__device_stub__Z10test_finalvv, .-_Z29__device_stub__Z10test_finalvv
.globl _Z10test_finalv
.type _Z10test_finalv, @function
_Z10test_finalv:
.LFB2463:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z29__device_stub__Z10test_finalvv
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2463:
.size _Z10test_finalv, .-_Z10test_finalv
.globl _Z22__device_stub__Z4dumpvv
.type _Z22__device_stub__Z4dumpvv, @function
_Z22__device_stub__Z4dumpvv:
.LFB2464:
.cfi_startproc
endbr64
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
leaq 8(%rsp), %rcx
movq %rsp, %rdx
leaq 28(%rsp), %rsi
leaq 16(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L31
.L27:
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L32
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L31:
.cfi_restore_state
pushq 8(%rsp)
.cfi_def_cfa_offset 104
pushq 8(%rsp)
.cfi_def_cfa_offset 112
leaq 80(%rsp), %r9
movq 44(%rsp), %rcx
movl 52(%rsp), %r8d
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
leaq _Z4dumpv(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 96
jmp .L27
.L32:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2464:
.size _Z22__device_stub__Z4dumpvv, .-_Z22__device_stub__Z4dumpvv
.globl _Z4dumpv
.type _Z4dumpv, @function
_Z4dumpv:
.LFB2465:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z22__device_stub__Z4dumpvv
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2465:
.size _Z4dumpv, .-_Z4dumpv
.globl main
.type main, @function
main:
.LFB2435:
.cfi_startproc
endbr64
subq $40, %rsp
.cfi_def_cfa_offset 48
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 8(%rsp)
movl $1, 12(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L38
.L36:
call cudaDeviceSynchronize@PLT
movl $0, %eax
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L38:
.cfi_restore_state
call _Z22__device_stub__Z4dumpvv
jmp .L36
.cfi_endproc
.LFE2435:
.size main, .-main
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z4dumpv"
.LC1:
.string "_Z10test_finalv"
.LC2:
.string "_Z4testv"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2467:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z4dumpv(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z10test_finalv(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z4testv(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2467:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "mish_design_half.hip"
.globl _Z19__device_stub__testv # -- Begin function _Z19__device_stub__testv
.p2align 4, 0x90
.type _Z19__device_stub__testv,@function
_Z19__device_stub__testv: # @_Z19__device_stub__testv
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z4testv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $72, %rsp
.cfi_adjust_cfa_offset -72
retq
.Lfunc_end0:
.size _Z19__device_stub__testv, .Lfunc_end0-_Z19__device_stub__testv
.cfi_endproc
# -- End function
.globl _Z25__device_stub__test_finalv # -- Begin function _Z25__device_stub__test_finalv
.p2align 4, 0x90
.type _Z25__device_stub__test_finalv,@function
_Z25__device_stub__test_finalv: # @_Z25__device_stub__test_finalv
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z10test_finalv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $72, %rsp
.cfi_adjust_cfa_offset -72
retq
.Lfunc_end1:
.size _Z25__device_stub__test_finalv, .Lfunc_end1-_Z25__device_stub__test_finalv
.cfi_endproc
# -- End function
.globl _Z19__device_stub__dumpv # -- Begin function _Z19__device_stub__dumpv
.p2align 4, 0x90
.type _Z19__device_stub__dumpv,@function
_Z19__device_stub__dumpv: # @_Z19__device_stub__dumpv
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z4dumpv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $72, %rsp
.cfi_adjust_cfa_offset -72
retq
.Lfunc_end2:
.size _Z19__device_stub__dumpv, .Lfunc_end2-_Z19__device_stub__dumpv
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
movabsq $4294967297, %rdi # imm = 0x100000001
movl $1, %esi
movq %rdi, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_2
# %bb.1:
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z4dumpv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_2:
callq hipDeviceSynchronize
xorl %eax, %eax
addq $56, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end3:
.size main, .Lfunc_end3-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z4testv, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z10test_finalv, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z4dumpv, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z4testv,@object # @_Z4testv
.section .rodata,"a",@progbits
.globl _Z4testv
.p2align 3, 0x0
_Z4testv:
.quad _Z19__device_stub__testv
.size _Z4testv, 8
.type _Z10test_finalv,@object # @_Z10test_finalv
.globl _Z10test_finalv
.p2align 3, 0x0
_Z10test_finalv:
.quad _Z25__device_stub__test_finalv
.size _Z10test_finalv, 8
.type _Z4dumpv,@object # @_Z4dumpv
.globl _Z4dumpv
.p2align 3, 0x0
_Z4dumpv:
.quad _Z19__device_stub__dumpv
.size _Z4dumpv, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z4testv"
.size .L__unnamed_1, 9
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z10test_finalv"
.size .L__unnamed_2, 16
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "_Z4dumpv"
.size .L__unnamed_3, 9
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z19__device_stub__testv
.addrsig_sym _Z25__device_stub__test_finalv
.addrsig_sym _Z19__device_stub__dumpv
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z4testv
.addrsig_sym _Z10test_finalv
.addrsig_sym _Z4dumpv
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | /*
* Copyright 2015 Netherlands eScience Center, VU University Amsterdam, and Netherlands Forensic Institute
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This file contains CUDA kernels for applying a Wiener filter to a
* PRNU pattern, as proposed by:
* M. Chen et al. "Determining image origin and integrity using sensor
* noise", IEEE Trans. Inf. Forensics Secur. 3 (2008) 74-90.
*
* The Wiener filter is used to remove JPEG artifacts from a PRNU pattern.
*
* To apply the complete filter:
* apply Fourier transform to the input image
* call computeSquaredMagnitudes() on the frequencies
* call computeVarianceEstimates() on the squared magnitudes
* call computeVarianceZeroMean() on the squared magnitudes
* call scaleWithVariances() scaling the frequencies using the local and global variance
* apply inverse Fourier transform
* normalize result by calling normalizeToReal()
*
* @author Ben van Werkhoven <b.vanwerkhoven@esciencecenter.nl>
* @version 0.1
*/
#ifndef block_size_x
#define block_size_x 32
#endif
#ifndef block_size_y
#define block_size_y 16
#endif
#ifndef reuse_computation
#define reuse_computation 1
#endif
//set the number and size of filters, also adjust max_border
#define FILTERS 4
#define FILTER_SIZES {3, 5, 7, 9}
#define MAX_BORDER 4 //the largest (filter size/2)
#define FLT_MAX 3.40282347e+38f
//function interfaces to prevent C++ garbling the kernel keys
extern "C" {
__global__ void computeSquaredMagnitudes(int h, int w, float* output, float* frequencies);
__global__ void scaleWithVariances(int h, int w, float* output, float* input, float* varianceEstimates, float* variance);
__global__ void toComplex(int h, int w, float* complex, float* input);
__global__ void toReal(int h, int w, float* output, float* complex);
__global__ void computeVarianceZeroMean(int n, float* output, float *input);
__global__ void computeVarianceEstimates(int h, int w, float* varest, float* input);
__global__ void computeVarianceEstimates_naive(int h, int w, float* varest, float* input);
__global__ void normalizeToReal(int h, int w, float* output, float* complex);
__global__ void normalize(int h, int w, float* output, float* complex);
__global__ void sumFloats(float *output, float *input, int n);
}
/**
* Computes the square of each frequency and stores the result as a real.
*/
__global__ void computeSquaredMagnitudes(int h, int w, float* output, float* frequencies) {
int i = threadIdx.y + blockIdx.y * block_size_y;
int j = threadIdx.x + blockIdx.x * block_size_x;
if (j < w && i < h) {
float re = frequencies[i*2*w+(2 * j)];
float im = frequencies[i*2*w+(2 * j + 1)];
output[i*w+j] = (re * re) + (im * im);
}
}
/**
* This kernel scales the frequencies in input with a combination of the global variance and an estimate
* for the local variance at that position. Effectively this cleans the input pattern from low frequency
* noise.
*/
__global__ void scaleWithVariances(int h, int w, float* output, float* input, float* varianceEstimates, float* variance) {
int i = threadIdx.y + blockIdx.y * block_size_y;
int j = threadIdx.x + blockIdx.x * block_size_x;
float var = variance[0];
if (j < w && i < h) {
float scale = var / max(var, varianceEstimates[i*w+j]);
output[i*2*w+(j * 2)] = input[i*2*w+(j*2)] * scale;
output[i*2*w+(j * 2 + 1)] = input[i*2*w+(j * 2 + 1)] * scale;
}
}
/**
* Simple helper kernel to convert an array of real values to an array of complex values
*/
__global__ void toComplex(int h, int w, float* complex, float* input) {
int i = threadIdx.y + blockIdx.y * block_size_y;
int j = threadIdx.x + blockIdx.x * block_size_x;
if (i < h && j < w) {
complex[i * w * 2 + 2 * j] = input[i * w + j];
complex[i * w * 2 + (2 * j + 1)] = 0.0f;
}
}
/**
* Simple helper kernel to convert a complex array to an array of real values
*/
__global__ void toReal(int h, int w, float* output, float* complex) {
int i = threadIdx.y + blockIdx.y * block_size_y;
int j = threadIdx.x + blockIdx.x * block_size_x;
if (i < h && j < w) {
output[i*w+j] = complex[i * w * 2 + 2 * j];
}
}
/**
* This kernel normalizes the input by dividing it by the number of pixels in the image.
* It takes an array of complex numbers as input, but only stores the real values.
*/
__global__ void normalizeToReal(int h, int w, float* output, float* complex) {
int i = threadIdx.y + blockIdx.y * block_size_y;
int j = threadIdx.x + blockIdx.x * block_size_x;
if (i < h && j < w) {
output[i*w+j] = (complex[i * w * 2 + 2 * j] / (float)(w * h));
}
}
/**
* This kernel normalizes the complex input by dividing it by the number of pixels in the image.
*/
__global__ void normalize(int h, int w, float* complex_out, float* complex_in) {
int i = threadIdx.y + blockIdx.y * block_size_y;
int j = threadIdx.x + blockIdx.x * block_size_x;
if (i < h && j < w) {
complex_out[i*w*2+2*j ] = (complex_in[i*w*2+2*j ] / (float)(w*h));
complex_out[i*w*2+2*j+1] = (complex_in[i*w*2+2*j+1] / (float)(w*h));
}
}
/**
* computeVarianceEstimates uses a number of simple filters to compute a minimum local variance
*
* Instead of using multiple arrays with zeroed borders around them, the loading phase of this
* kernel writes a zero to shared memory instead of loading a border value from global memory.
* The filters can then be performed as normal on the data in shared memory. Because of this
* MAX_BORDER needs to be set accordingly.
*
*/
__global__ void computeVarianceEstimates(int h, int w, float* varest, float* input) {
int ty = threadIdx.y;
int tx = threadIdx.x;
int i = blockIdx.y * block_size_y;
int j = blockIdx.x * block_size_x;
__shared__ float shinput[block_size_y+2*MAX_BORDER][block_size_x+2*MAX_BORDER];
//the loading phase of the kernel, which writes 0.0f to shared memory if the index
//is outside the input
int yEnd = block_size_y+2*MAX_BORDER;
int xEnd = block_size_x+2*MAX_BORDER;
for (int y=ty; y < yEnd; y+= block_size_y) {
for (int x=tx; x < xEnd; x+= block_size_x) {
float in = 0.0f;
int indexy = i+y-MAX_BORDER;
int indexx = j+x-MAX_BORDER;
if (indexy >= 0 && indexy < h) {
if (indexx >= 0 && indexx < w) {
in = input[indexy*w+indexx];
}
}
shinput[y][x] = in;
}
}
__syncthreads();
const int filter[FILTERS] = FILTER_SIZES;
float res = FLT_MAX;
#if reuse_computation == 0
//perform filtering without reusing the sum from smaller filters
for (int f = 0; f < FILTERS; f++) {
int filterSize = filter[f];
int offset = MAX_BORDER-(filterSize/2);
//do a convolution
float sum = 0.0f;
for (int fi = 0; fi < filterSize; fi++) {
for (int fj = 0; fj < filterSize; fj++) {
sum += shinput[ty+fi+offset][tx+fj+offset];
}
}
sum /= (float)(filterSize * filterSize);
//store minimum
res = sum < res ? sum : res;
}
#elif reuse_computation == 1
//perform filtering while reusing the sum from smaller filters
//start from center pixel
float sum = shinput[ty+MAX_BORDER][tx+MAX_BORDER];
//add sides of the square filter to sum and store minimum average
for (int f = 0; f < FILTERS; f++) {
int filterSize = filter[f];
int offset = MAX_BORDER-(filterSize/2);
//top and bottom row
for (int fj=0; fj<filterSize; fj++) {
sum += shinput[ty+0+offset][tx+fj+offset];
sum += shinput[ty+filterSize-1+offset][tx+fj+offset];
}
//two sides (between top and bottom rows)
for (int fi=1; fi<filterSize-1; fi++) {
sum += shinput[ty+fi+offset][tx+0+offset];
sum += shinput[ty+fi+offset][tx+filterSize-1+offset];
}
//store minimum
float avg = sum / (filterSize*filterSize);
res = avg < res ? avg : res;
}
#endif
//write output
if (i + ty < h) {
if (j + tx < w) {
varest[(i+ty)*w + (j+tx)] = res;
}
}
}
/**
* This method is a naive implementation of computeVarianceEstimates used for correctness checks
*/
__global__ void computeVarianceEstimates_naive(int h, int w, float* varest, float* input) {
int i = threadIdx.y + blockIdx.y * block_size_y;
int j = threadIdx.x + blockIdx.x * block_size_x;
float res = FLT_MAX;
if (i < h && j < w) {
const int filter[FILTERS] = FILTER_SIZES;
for (int f = 0; f < FILTERS; f++) {
int filterSize = filter[f];
int border = filterSize/2;
//do a convolution
float sum = 0.0f;
for (int fi = 0; fi < filterSize; fi++) {
for (int fj = 0; fj < filterSize; fj++) {
//original
//sum += input[(i + fi)*(w+border*2)+(j + fj)];
int row = i+fi-border;
int col = j+fj-border;
//the following ifs are a hack to save redundant copying
if (row >= 0 && row < h) {
if (col >= 0 && col < w) {
sum += input[row*w + col];
}
}
}
}
sum /= (float)(filterSize * filterSize);
if (sum < res) {
res = sum;
}
}
//write output
varest[i*w+j] = res;
}
}
/*
* This method computes the variance of an input array, assuming the mean is equal to zero
*
* Thread block size should be power of two because of the reduction.
* The implementation currently assumes only one thread block is used for the entire input array
*
* In case of multiple thread blocks initialize output to zero and use atomic add or another kernel
*
* block_size_x power of 2
*/
#ifndef grid_size_x //hack to see if the Kernel Tuner is being used
#undef block_size_x
#define block_size_x 128
#endif
__global__ void computeVarianceZeroMean(int n, float *output, float *input) {
int x = blockIdx.x * block_size_x + threadIdx.x;
int ti = threadIdx.x;
int step_size = block_size_x * gridDim.x;
float sum = 0.0f;
if (x < n) {
//compute thread-local sums of squares
for (int i=x; i < n; i+=step_size) {
sum += input[i]*input[i];
}
}
//store local sums in shared memory
__shared__ float shmem[block_size_x];
shmem[ti] = sum;
__syncthreads();
//reduce local sums
for (unsigned int s=block_size_x/2; s>0; s>>=1) {
if (ti < s) {
shmem[ti] += shmem[ti + s];
}
__syncthreads();
}
//write result
if (ti == 0) {
output[blockIdx.x] = ( shmem[0] * n ) / ( n - 1 ); //in case of multiple threadblocks write back using atomicAdd
}
}
/*
* Simple CUDA Helper function to reduce the output of a
* reduction kernel with multiple thread blocks to a single value
*
* This function performs a sum of an array of floats
*
* This function is to be called with only a single thread block
*/
__global__ void sumFloats(float *output, float *input, int n) {
int ti = threadIdx.x;
__shared__ float shmem[block_size_x];
//compute thread-local sums
float sum = 0.0f;
for (int i=ti; i < n; i+=block_size_x) {
sum += input[i];
}
//store local sums in shared memory
shmem[ti] = sum;
__syncthreads();
//reduce local sums
for (unsigned int s=block_size_x/2; s>0; s>>=1) {
if (ti < s) {
shmem[ti] += shmem[ti + s];
}
__syncthreads();
}
//write result
if (ti == 0) {
output[0] = shmem[0];
}
} | .file "tmpxft_001bdbf1_00000000-6_wienerfilter.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z48__device_stub__Z24computeSquaredMagnitudesiiPfS_iiPfS_
.type _Z48__device_stub__Z24computeSquaredMagnitudesiiPfS_iiPfS_, @function
_Z48__device_stub__Z24computeSquaredMagnitudesiiPfS_iiPfS_:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movl %edi, 28(%rsp)
movl %esi, 24(%rsp)
movq %rdx, 16(%rsp)
movq %rcx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 24(%rsp), %rax
movq %rax, 104(%rsp)
leaq 16(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq computeSquaredMagnitudes(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z48__device_stub__Z24computeSquaredMagnitudesiiPfS_iiPfS_, .-_Z48__device_stub__Z24computeSquaredMagnitudesiiPfS_iiPfS_
.globl computeSquaredMagnitudes
.type computeSquaredMagnitudes, @function
computeSquaredMagnitudes:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z48__device_stub__Z24computeSquaredMagnitudesiiPfS_iiPfS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size computeSquaredMagnitudes, .-computeSquaredMagnitudes
.globl _Z46__device_stub__Z18scaleWithVariancesiiPfS_S_S_iiPfS_S_S_
.type _Z46__device_stub__Z18scaleWithVariancesiiPfS_S_S_iiPfS_S_S_, @function
_Z46__device_stub__Z18scaleWithVariancesiiPfS_S_S_iiPfS_S_S_:
.LFB2053:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movl %edi, 44(%rsp)
movl %esi, 40(%rsp)
movq %rdx, 32(%rsp)
movq %rcx, 24(%rsp)
movq %r8, 16(%rsp)
movq %r9, 8(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 44(%rsp), %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rax
movq %rax, 120(%rsp)
leaq 32(%rsp), %rax
movq %rax, 128(%rsp)
leaq 24(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 8(%rsp), %rax
movq %rax, 152(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq scaleWithVariances(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2053:
.size _Z46__device_stub__Z18scaleWithVariancesiiPfS_S_S_iiPfS_S_S_, .-_Z46__device_stub__Z18scaleWithVariancesiiPfS_S_S_iiPfS_S_S_
.globl scaleWithVariances
.type scaleWithVariances, @function
scaleWithVariances:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z46__device_stub__Z18scaleWithVariancesiiPfS_S_S_iiPfS_S_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size scaleWithVariances, .-scaleWithVariances
.globl _Z32__device_stub__Z9toComplexiiPfS_iiPfS_
.type _Z32__device_stub__Z9toComplexiiPfS_iiPfS_, @function
_Z32__device_stub__Z9toComplexiiPfS_iiPfS_:
.LFB2055:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movl %edi, 28(%rsp)
movl %esi, 24(%rsp)
movq %rdx, 16(%rsp)
movq %rcx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 24(%rsp), %rax
movq %rax, 104(%rsp)
leaq 16(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L23
.L19:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L24
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L23:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq toComplex(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L19
.L24:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2055:
.size _Z32__device_stub__Z9toComplexiiPfS_iiPfS_, .-_Z32__device_stub__Z9toComplexiiPfS_iiPfS_
.globl toComplex
.type toComplex, @function
toComplex:
.LFB2056:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z32__device_stub__Z9toComplexiiPfS_iiPfS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2056:
.size toComplex, .-toComplex
.globl _Z29__device_stub__Z6toRealiiPfS_iiPfS_
.type _Z29__device_stub__Z6toRealiiPfS_iiPfS_, @function
_Z29__device_stub__Z6toRealiiPfS_iiPfS_:
.LFB2057:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movl %edi, 28(%rsp)
movl %esi, 24(%rsp)
movq %rdx, 16(%rsp)
movq %rcx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 24(%rsp), %rax
movq %rax, 104(%rsp)
leaq 16(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L31
.L27:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L32
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L31:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq toReal(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L27
.L32:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size _Z29__device_stub__Z6toRealiiPfS_iiPfS_, .-_Z29__device_stub__Z6toRealiiPfS_iiPfS_
.globl toReal
.type toReal, @function
toReal:
.LFB2058:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z29__device_stub__Z6toRealiiPfS_iiPfS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2058:
.size toReal, .-toReal
.globl _Z39__device_stub__Z15normalizeToRealiiPfS_iiPfS_
.type _Z39__device_stub__Z15normalizeToRealiiPfS_iiPfS_, @function
_Z39__device_stub__Z15normalizeToRealiiPfS_iiPfS_:
.LFB2059:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movl %edi, 28(%rsp)
movl %esi, 24(%rsp)
movq %rdx, 16(%rsp)
movq %rcx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 24(%rsp), %rax
movq %rax, 104(%rsp)
leaq 16(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L39
.L35:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L40
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L39:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq normalizeToReal(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L35
.L40:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2059:
.size _Z39__device_stub__Z15normalizeToRealiiPfS_iiPfS_, .-_Z39__device_stub__Z15normalizeToRealiiPfS_iiPfS_
.globl normalizeToReal
.type normalizeToReal, @function
normalizeToReal:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z39__device_stub__Z15normalizeToRealiiPfS_iiPfS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size normalizeToReal, .-normalizeToReal
.globl _Z32__device_stub__Z9normalizeiiPfS_iiPfS_
.type _Z32__device_stub__Z9normalizeiiPfS_iiPfS_, @function
_Z32__device_stub__Z9normalizeiiPfS_iiPfS_:
.LFB2061:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movl %edi, 28(%rsp)
movl %esi, 24(%rsp)
movq %rdx, 16(%rsp)
movq %rcx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 24(%rsp), %rax
movq %rax, 104(%rsp)
leaq 16(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L47
.L43:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L48
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L47:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq normalize(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L43
.L48:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2061:
.size _Z32__device_stub__Z9normalizeiiPfS_iiPfS_, .-_Z32__device_stub__Z9normalizeiiPfS_iiPfS_
.globl normalize
.type normalize, @function
normalize:
.LFB2062:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z32__device_stub__Z9normalizeiiPfS_iiPfS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2062:
.size normalize, .-normalize
.globl _Z48__device_stub__Z24computeVarianceEstimatesiiPfS_iiPfS_
.type _Z48__device_stub__Z24computeVarianceEstimatesiiPfS_iiPfS_, @function
_Z48__device_stub__Z24computeVarianceEstimatesiiPfS_iiPfS_:
.LFB2063:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movl %edi, 28(%rsp)
movl %esi, 24(%rsp)
movq %rdx, 16(%rsp)
movq %rcx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 24(%rsp), %rax
movq %rax, 104(%rsp)
leaq 16(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L55
.L51:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L56
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L55:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq computeVarianceEstimates(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L51
.L56:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2063:
.size _Z48__device_stub__Z24computeVarianceEstimatesiiPfS_iiPfS_, .-_Z48__device_stub__Z24computeVarianceEstimatesiiPfS_iiPfS_
.globl computeVarianceEstimates
.type computeVarianceEstimates, @function
computeVarianceEstimates:
.LFB2064:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z48__device_stub__Z24computeVarianceEstimatesiiPfS_iiPfS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2064:
.size computeVarianceEstimates, .-computeVarianceEstimates
.globl _Z54__device_stub__Z30computeVarianceEstimates_naiveiiPfS_iiPfS_
.type _Z54__device_stub__Z30computeVarianceEstimates_naiveiiPfS_iiPfS_, @function
_Z54__device_stub__Z30computeVarianceEstimates_naiveiiPfS_iiPfS_:
.LFB2065:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movl %edi, 28(%rsp)
movl %esi, 24(%rsp)
movq %rdx, 16(%rsp)
movq %rcx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 24(%rsp), %rax
movq %rax, 104(%rsp)
leaq 16(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L63
.L59:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L64
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L63:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq computeVarianceEstimates_naive(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L59
.L64:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2065:
.size _Z54__device_stub__Z30computeVarianceEstimates_naiveiiPfS_iiPfS_, .-_Z54__device_stub__Z30computeVarianceEstimates_naiveiiPfS_iiPfS_
.globl computeVarianceEstimates_naive
.type computeVarianceEstimates_naive, @function
computeVarianceEstimates_naive:
.LFB2066:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z54__device_stub__Z30computeVarianceEstimates_naiveiiPfS_iiPfS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2066:
.size computeVarianceEstimates_naive, .-computeVarianceEstimates_naive
.globl _Z46__device_stub__Z23computeVarianceZeroMeaniPfS_iPfS_
.type _Z46__device_stub__Z23computeVarianceZeroMeaniPfS_iPfS_, @function
_Z46__device_stub__Z23computeVarianceZeroMeaniPfS_iPfS_:
.LFB2067:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movl %edi, 28(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L71
.L67:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L72
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L71:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq computeVarianceZeroMean(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L67
.L72:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2067:
.size _Z46__device_stub__Z23computeVarianceZeroMeaniPfS_iPfS_, .-_Z46__device_stub__Z23computeVarianceZeroMeaniPfS_iPfS_
.globl computeVarianceZeroMean
.type computeVarianceZeroMean, @function
computeVarianceZeroMean:
.LFB2068:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z46__device_stub__Z23computeVarianceZeroMeaniPfS_iPfS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2068:
.size computeVarianceZeroMean, .-computeVarianceZeroMean
.globl _Z31__device_stub__Z9sumFloatsPfS_iPfS_i
.type _Z31__device_stub__Z9sumFloatsPfS_iPfS_i, @function
_Z31__device_stub__Z9sumFloatsPfS_iPfS_i:
.LFB2069:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L79
.L75:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L80
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L79:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq sumFloats(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L75
.L80:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2069:
.size _Z31__device_stub__Z9sumFloatsPfS_iPfS_i, .-_Z31__device_stub__Z9sumFloatsPfS_iPfS_i
.globl sumFloats
.type sumFloats, @function
sumFloats:
.LFB2070:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z31__device_stub__Z9sumFloatsPfS_iPfS_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2070:
.size sumFloats, .-sumFloats
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "sumFloats"
.LC1:
.string "computeVarianceZeroMean"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC2:
.string "computeVarianceEstimates_naive"
.section .rodata.str1.1
.LC3:
.string "computeVarianceEstimates"
.LC4:
.string "normalize"
.LC5:
.string "normalizeToReal"
.LC6:
.string "toReal"
.LC7:
.string "toComplex"
.LC8:
.string "scaleWithVariances"
.LC9:
.string "computeSquaredMagnitudes"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2072:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq sumFloats(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq computeVarianceZeroMean(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq computeVarianceEstimates_naive(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq computeVarianceEstimates(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC4(%rip), %rdx
movq %rdx, %rcx
leaq normalize(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC5(%rip), %rdx
movq %rdx, %rcx
leaq normalizeToReal(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC6(%rip), %rdx
movq %rdx, %rcx
leaq toReal(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC7(%rip), %rdx
movq %rdx, %rcx
leaq toComplex(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC8(%rip), %rdx
movq %rdx, %rcx
leaq scaleWithVariances(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC9(%rip), %rdx
movq %rdx, %rcx
leaq computeSquaredMagnitudes(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2072:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | /*
* Copyright 2015 Netherlands eScience Center, VU University Amsterdam, and Netherlands Forensic Institute
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This file contains CUDA kernels for applying a Wiener filter to a
* PRNU pattern, as proposed by:
* M. Chen et al. "Determining image origin and integrity using sensor
* noise", IEEE Trans. Inf. Forensics Secur. 3 (2008) 74-90.
*
* The Wiener filter is used to remove JPEG artifacts from a PRNU pattern.
*
* To apply the complete filter:
* apply Fourier transform to the input image
* call computeSquaredMagnitudes() on the frequencies
* call computeVarianceEstimates() on the squared magnitudes
* call computeVarianceZeroMean() on the squared magnitudes
* call scaleWithVariances() scaling the frequencies using the local and global variance
* apply inverse Fourier transform
* normalize result by calling normalizeToReal()
*
* @author Ben van Werkhoven <b.vanwerkhoven@esciencecenter.nl>
* @version 0.1
*/
#ifndef block_size_x
#define block_size_x 32
#endif
#ifndef block_size_y
#define block_size_y 16
#endif
#ifndef reuse_computation
#define reuse_computation 1
#endif
//set the number and size of filters, also adjust max_border
#define FILTERS 4
#define FILTER_SIZES {3, 5, 7, 9}
#define MAX_BORDER 4 //the largest (filter size/2)
#define FLT_MAX 3.40282347e+38f
//function interfaces to prevent C++ garbling the kernel keys
extern "C" {
__global__ void computeSquaredMagnitudes(int h, int w, float* output, float* frequencies);
__global__ void scaleWithVariances(int h, int w, float* output, float* input, float* varianceEstimates, float* variance);
__global__ void toComplex(int h, int w, float* complex, float* input);
__global__ void toReal(int h, int w, float* output, float* complex);
__global__ void computeVarianceZeroMean(int n, float* output, float *input);
__global__ void computeVarianceEstimates(int h, int w, float* varest, float* input);
__global__ void computeVarianceEstimates_naive(int h, int w, float* varest, float* input);
__global__ void normalizeToReal(int h, int w, float* output, float* complex);
__global__ void normalize(int h, int w, float* output, float* complex);
__global__ void sumFloats(float *output, float *input, int n);
}
/**
* Computes the square of each frequency and stores the result as a real.
*/
__global__ void computeSquaredMagnitudes(int h, int w, float* output, float* frequencies) {
int i = threadIdx.y + blockIdx.y * block_size_y;
int j = threadIdx.x + blockIdx.x * block_size_x;
if (j < w && i < h) {
float re = frequencies[i*2*w+(2 * j)];
float im = frequencies[i*2*w+(2 * j + 1)];
output[i*w+j] = (re * re) + (im * im);
}
}
/**
* This kernel scales the frequencies in input with a combination of the global variance and an estimate
* for the local variance at that position. Effectively this cleans the input pattern from low frequency
* noise.
*/
__global__ void scaleWithVariances(int h, int w, float* output, float* input, float* varianceEstimates, float* variance) {
int i = threadIdx.y + blockIdx.y * block_size_y;
int j = threadIdx.x + blockIdx.x * block_size_x;
float var = variance[0];
if (j < w && i < h) {
float scale = var / max(var, varianceEstimates[i*w+j]);
output[i*2*w+(j * 2)] = input[i*2*w+(j*2)] * scale;
output[i*2*w+(j * 2 + 1)] = input[i*2*w+(j * 2 + 1)] * scale;
}
}
/**
* Simple helper kernel to convert an array of real values to an array of complex values
*/
__global__ void toComplex(int h, int w, float* complex, float* input) {
int i = threadIdx.y + blockIdx.y * block_size_y;
int j = threadIdx.x + blockIdx.x * block_size_x;
if (i < h && j < w) {
complex[i * w * 2 + 2 * j] = input[i * w + j];
complex[i * w * 2 + (2 * j + 1)] = 0.0f;
}
}
/**
* Simple helper kernel to convert a complex array to an array of real values
*/
__global__ void toReal(int h, int w, float* output, float* complex) {
int i = threadIdx.y + blockIdx.y * block_size_y;
int j = threadIdx.x + blockIdx.x * block_size_x;
if (i < h && j < w) {
output[i*w+j] = complex[i * w * 2 + 2 * j];
}
}
/**
* This kernel normalizes the input by dividing it by the number of pixels in the image.
* It takes an array of complex numbers as input, but only stores the real values.
*/
__global__ void normalizeToReal(int h, int w, float* output, float* complex) {
int i = threadIdx.y + blockIdx.y * block_size_y;
int j = threadIdx.x + blockIdx.x * block_size_x;
if (i < h && j < w) {
output[i*w+j] = (complex[i * w * 2 + 2 * j] / (float)(w * h));
}
}
/**
* This kernel normalizes the complex input by dividing it by the number of pixels in the image.
*/
__global__ void normalize(int h, int w, float* complex_out, float* complex_in) {
int i = threadIdx.y + blockIdx.y * block_size_y;
int j = threadIdx.x + blockIdx.x * block_size_x;
if (i < h && j < w) {
complex_out[i*w*2+2*j ] = (complex_in[i*w*2+2*j ] / (float)(w*h));
complex_out[i*w*2+2*j+1] = (complex_in[i*w*2+2*j+1] / (float)(w*h));
}
}
/**
* computeVarianceEstimates uses a number of simple filters to compute a minimum local variance
*
* Instead of using multiple arrays with zeroed borders around them, the loading phase of this
* kernel writes a zero to shared memory instead of loading a border value from global memory.
* The filters can then be performed as normal on the data in shared memory. Because of this
* MAX_BORDER needs to be set accordingly.
*
*/
__global__ void computeVarianceEstimates(int h, int w, float* varest, float* input) {
int ty = threadIdx.y;
int tx = threadIdx.x;
int i = blockIdx.y * block_size_y;
int j = blockIdx.x * block_size_x;
__shared__ float shinput[block_size_y+2*MAX_BORDER][block_size_x+2*MAX_BORDER];
//the loading phase of the kernel, which writes 0.0f to shared memory if the index
//is outside the input
int yEnd = block_size_y+2*MAX_BORDER;
int xEnd = block_size_x+2*MAX_BORDER;
for (int y=ty; y < yEnd; y+= block_size_y) {
for (int x=tx; x < xEnd; x+= block_size_x) {
float in = 0.0f;
int indexy = i+y-MAX_BORDER;
int indexx = j+x-MAX_BORDER;
if (indexy >= 0 && indexy < h) {
if (indexx >= 0 && indexx < w) {
in = input[indexy*w+indexx];
}
}
shinput[y][x] = in;
}
}
__syncthreads();
const int filter[FILTERS] = FILTER_SIZES;
float res = FLT_MAX;
#if reuse_computation == 0
//perform filtering without reusing the sum from smaller filters
for (int f = 0; f < FILTERS; f++) {
int filterSize = filter[f];
int offset = MAX_BORDER-(filterSize/2);
//do a convolution
float sum = 0.0f;
for (int fi = 0; fi < filterSize; fi++) {
for (int fj = 0; fj < filterSize; fj++) {
sum += shinput[ty+fi+offset][tx+fj+offset];
}
}
sum /= (float)(filterSize * filterSize);
//store minimum
res = sum < res ? sum : res;
}
#elif reuse_computation == 1
//perform filtering while reusing the sum from smaller filters
//start from center pixel
float sum = shinput[ty+MAX_BORDER][tx+MAX_BORDER];
//add sides of the square filter to sum and store minimum average
for (int f = 0; f < FILTERS; f++) {
int filterSize = filter[f];
int offset = MAX_BORDER-(filterSize/2);
//top and bottom row
for (int fj=0; fj<filterSize; fj++) {
sum += shinput[ty+0+offset][tx+fj+offset];
sum += shinput[ty+filterSize-1+offset][tx+fj+offset];
}
//two sides (between top and bottom rows)
for (int fi=1; fi<filterSize-1; fi++) {
sum += shinput[ty+fi+offset][tx+0+offset];
sum += shinput[ty+fi+offset][tx+filterSize-1+offset];
}
//store minimum
float avg = sum / (filterSize*filterSize);
res = avg < res ? avg : res;
}
#endif
//write output
if (i + ty < h) {
if (j + tx < w) {
varest[(i+ty)*w + (j+tx)] = res;
}
}
}
/**
* This method is a naive implementation of computeVarianceEstimates used for correctness checks
*/
__global__ void computeVarianceEstimates_naive(int h, int w, float* varest, float* input) {
int i = threadIdx.y + blockIdx.y * block_size_y;
int j = threadIdx.x + blockIdx.x * block_size_x;
float res = FLT_MAX;
if (i < h && j < w) {
const int filter[FILTERS] = FILTER_SIZES;
for (int f = 0; f < FILTERS; f++) {
int filterSize = filter[f];
int border = filterSize/2;
//do a convolution
float sum = 0.0f;
for (int fi = 0; fi < filterSize; fi++) {
for (int fj = 0; fj < filterSize; fj++) {
//original
//sum += input[(i + fi)*(w+border*2)+(j + fj)];
int row = i+fi-border;
int col = j+fj-border;
//the following ifs are a hack to save redundant copying
if (row >= 0 && row < h) {
if (col >= 0 && col < w) {
sum += input[row*w + col];
}
}
}
}
sum /= (float)(filterSize * filterSize);
if (sum < res) {
res = sum;
}
}
//write output
varest[i*w+j] = res;
}
}
/*
* This method computes the variance of an input array, assuming the mean is equal to zero
*
* Thread block size should be power of two because of the reduction.
* The implementation currently assumes only one thread block is used for the entire input array
*
* In case of multiple thread blocks initialize output to zero and use atomic add or another kernel
*
* block_size_x power of 2
*/
#ifndef grid_size_x //hack to see if the Kernel Tuner is being used
#undef block_size_x
#define block_size_x 128
#endif
__global__ void computeVarianceZeroMean(int n, float *output, float *input) {
int x = blockIdx.x * block_size_x + threadIdx.x;
int ti = threadIdx.x;
int step_size = block_size_x * gridDim.x;
float sum = 0.0f;
if (x < n) {
//compute thread-local sums of squares
for (int i=x; i < n; i+=step_size) {
sum += input[i]*input[i];
}
}
//store local sums in shared memory
__shared__ float shmem[block_size_x];
shmem[ti] = sum;
__syncthreads();
//reduce local sums
for (unsigned int s=block_size_x/2; s>0; s>>=1) {
if (ti < s) {
shmem[ti] += shmem[ti + s];
}
__syncthreads();
}
//write result
if (ti == 0) {
output[blockIdx.x] = ( shmem[0] * n ) / ( n - 1 ); //in case of multiple threadblocks write back using atomicAdd
}
}
/*
* Simple CUDA Helper function to reduce the output of a
* reduction kernel with multiple thread blocks to a single value
*
* This function performs a sum of an array of floats
*
* This function is to be called with only a single thread block
*/
__global__ void sumFloats(float *output, float *input, int n) {
int ti = threadIdx.x;
__shared__ float shmem[block_size_x];
//compute thread-local sums
float sum = 0.0f;
for (int i=ti; i < n; i+=block_size_x) {
sum += input[i];
}
//store local sums in shared memory
shmem[ti] = sum;
__syncthreads();
//reduce local sums
for (unsigned int s=block_size_x/2; s>0; s>>=1) {
if (ti < s) {
shmem[ti] += shmem[ti + s];
}
__syncthreads();
}
//write result
if (ti == 0) {
output[0] = shmem[0];
}
} | #include <hip/hip_runtime.h>
/*
* Copyright 2015 Netherlands eScience Center, VU University Amsterdam, and Netherlands Forensic Institute
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This file contains CUDA kernels for applying a Wiener filter to a
* PRNU pattern, as proposed by:
* M. Chen et al. "Determining image origin and integrity using sensor
* noise", IEEE Trans. Inf. Forensics Secur. 3 (2008) 74-90.
*
* The Wiener filter is used to remove JPEG artifacts from a PRNU pattern.
*
* To apply the complete filter:
* apply Fourier transform to the input image
* call computeSquaredMagnitudes() on the frequencies
* call computeVarianceEstimates() on the squared magnitudes
* call computeVarianceZeroMean() on the squared magnitudes
* call scaleWithVariances() scaling the frequencies using the local and global variance
* apply inverse Fourier transform
* normalize result by calling normalizeToReal()
*
* @author Ben van Werkhoven <b.vanwerkhoven@esciencecenter.nl>
* @version 0.1
*/
#ifndef block_size_x
#define block_size_x 32
#endif
#ifndef block_size_y
#define block_size_y 16
#endif
#ifndef reuse_computation
#define reuse_computation 1
#endif
//set the number and size of filters, also adjust max_border
#define FILTERS 4
#define FILTER_SIZES {3, 5, 7, 9}
#define MAX_BORDER 4 //the largest (filter size/2)
#define FLT_MAX 3.40282347e+38f
//function interfaces to prevent C++ garbling the kernel keys
extern "C" {
__global__ void computeSquaredMagnitudes(int h, int w, float* output, float* frequencies);
__global__ void scaleWithVariances(int h, int w, float* output, float* input, float* varianceEstimates, float* variance);
__global__ void toComplex(int h, int w, float* complex, float* input);
__global__ void toReal(int h, int w, float* output, float* complex);
__global__ void computeVarianceZeroMean(int n, float* output, float *input);
__global__ void computeVarianceEstimates(int h, int w, float* varest, float* input);
__global__ void computeVarianceEstimates_naive(int h, int w, float* varest, float* input);
__global__ void normalizeToReal(int h, int w, float* output, float* complex);
__global__ void normalize(int h, int w, float* output, float* complex);
__global__ void sumFloats(float *output, float *input, int n);
}
/**
* Computes the square of each frequency and stores the result as a real.
*/
__global__ void computeSquaredMagnitudes(int h, int w, float* output, float* frequencies) {
int i = threadIdx.y + blockIdx.y * block_size_y;
int j = threadIdx.x + blockIdx.x * block_size_x;
if (j < w && i < h) {
float re = frequencies[i*2*w+(2 * j)];
float im = frequencies[i*2*w+(2 * j + 1)];
output[i*w+j] = (re * re) + (im * im);
}
}
/**
* This kernel scales the frequencies in input with a combination of the global variance and an estimate
* for the local variance at that position. Effectively this cleans the input pattern from low frequency
* noise.
*/
__global__ void scaleWithVariances(int h, int w, float* output, float* input, float* varianceEstimates, float* variance) {
int i = threadIdx.y + blockIdx.y * block_size_y;
int j = threadIdx.x + blockIdx.x * block_size_x;
float var = variance[0];
if (j < w && i < h) {
float scale = var / max(var, varianceEstimates[i*w+j]);
output[i*2*w+(j * 2)] = input[i*2*w+(j*2)] * scale;
output[i*2*w+(j * 2 + 1)] = input[i*2*w+(j * 2 + 1)] * scale;
}
}
/**
* Simple helper kernel to convert an array of real values to an array of complex values
*/
__global__ void toComplex(int h, int w, float* complex, float* input) {
int i = threadIdx.y + blockIdx.y * block_size_y;
int j = threadIdx.x + blockIdx.x * block_size_x;
if (i < h && j < w) {
complex[i * w * 2 + 2 * j] = input[i * w + j];
complex[i * w * 2 + (2 * j + 1)] = 0.0f;
}
}
/**
* Simple helper kernel to convert a complex array to an array of real values
*/
__global__ void toReal(int h, int w, float* output, float* complex) {
int i = threadIdx.y + blockIdx.y * block_size_y;
int j = threadIdx.x + blockIdx.x * block_size_x;
if (i < h && j < w) {
output[i*w+j] = complex[i * w * 2 + 2 * j];
}
}
/**
* This kernel normalizes the input by dividing it by the number of pixels in the image.
* It takes an array of complex numbers as input, but only stores the real values.
*/
__global__ void normalizeToReal(int h, int w, float* output, float* complex) {
int i = threadIdx.y + blockIdx.y * block_size_y;
int j = threadIdx.x + blockIdx.x * block_size_x;
if (i < h && j < w) {
output[i*w+j] = (complex[i * w * 2 + 2 * j] / (float)(w * h));
}
}
/**
* This kernel normalizes the complex input by dividing it by the number of pixels in the image.
*/
__global__ void normalize(int h, int w, float* complex_out, float* complex_in) {
int i = threadIdx.y + blockIdx.y * block_size_y;
int j = threadIdx.x + blockIdx.x * block_size_x;
if (i < h && j < w) {
complex_out[i*w*2+2*j ] = (complex_in[i*w*2+2*j ] / (float)(w*h));
complex_out[i*w*2+2*j+1] = (complex_in[i*w*2+2*j+1] / (float)(w*h));
}
}
/**
* computeVarianceEstimates uses a number of simple filters to compute a minimum local variance
*
* Instead of using multiple arrays with zeroed borders around them, the loading phase of this
* kernel writes a zero to shared memory instead of loading a border value from global memory.
* The filters can then be performed as normal on the data in shared memory. Because of this
* MAX_BORDER needs to be set accordingly.
*
*/
__global__ void computeVarianceEstimates(int h, int w, float* varest, float* input) {
int ty = threadIdx.y;
int tx = threadIdx.x;
int i = blockIdx.y * block_size_y;
int j = blockIdx.x * block_size_x;
__shared__ float shinput[block_size_y+2*MAX_BORDER][block_size_x+2*MAX_BORDER];
//the loading phase of the kernel, which writes 0.0f to shared memory if the index
//is outside the input
int yEnd = block_size_y+2*MAX_BORDER;
int xEnd = block_size_x+2*MAX_BORDER;
for (int y=ty; y < yEnd; y+= block_size_y) {
for (int x=tx; x < xEnd; x+= block_size_x) {
float in = 0.0f;
int indexy = i+y-MAX_BORDER;
int indexx = j+x-MAX_BORDER;
if (indexy >= 0 && indexy < h) {
if (indexx >= 0 && indexx < w) {
in = input[indexy*w+indexx];
}
}
shinput[y][x] = in;
}
}
__syncthreads();
const int filter[FILTERS] = FILTER_SIZES;
float res = FLT_MAX;
#if reuse_computation == 0
//perform filtering without reusing the sum from smaller filters
for (int f = 0; f < FILTERS; f++) {
int filterSize = filter[f];
int offset = MAX_BORDER-(filterSize/2);
//do a convolution
float sum = 0.0f;
for (int fi = 0; fi < filterSize; fi++) {
for (int fj = 0; fj < filterSize; fj++) {
sum += shinput[ty+fi+offset][tx+fj+offset];
}
}
sum /= (float)(filterSize * filterSize);
//store minimum
res = sum < res ? sum : res;
}
#elif reuse_computation == 1
//perform filtering while reusing the sum from smaller filters
//start from center pixel
float sum = shinput[ty+MAX_BORDER][tx+MAX_BORDER];
//add sides of the square filter to sum and store minimum average
for (int f = 0; f < FILTERS; f++) {
int filterSize = filter[f];
int offset = MAX_BORDER-(filterSize/2);
//top and bottom row
for (int fj=0; fj<filterSize; fj++) {
sum += shinput[ty+0+offset][tx+fj+offset];
sum += shinput[ty+filterSize-1+offset][tx+fj+offset];
}
//two sides (between top and bottom rows)
for (int fi=1; fi<filterSize-1; fi++) {
sum += shinput[ty+fi+offset][tx+0+offset];
sum += shinput[ty+fi+offset][tx+filterSize-1+offset];
}
//store minimum
float avg = sum / (filterSize*filterSize);
res = avg < res ? avg : res;
}
#endif
//write output
if (i + ty < h) {
if (j + tx < w) {
varest[(i+ty)*w + (j+tx)] = res;
}
}
}
/**
* This method is a naive implementation of computeVarianceEstimates used for correctness checks
*/
__global__ void computeVarianceEstimates_naive(int h, int w, float* varest, float* input) {
int i = threadIdx.y + blockIdx.y * block_size_y;
int j = threadIdx.x + blockIdx.x * block_size_x;
float res = FLT_MAX;
if (i < h && j < w) {
const int filter[FILTERS] = FILTER_SIZES;
for (int f = 0; f < FILTERS; f++) {
int filterSize = filter[f];
int border = filterSize/2;
//do a convolution
float sum = 0.0f;
for (int fi = 0; fi < filterSize; fi++) {
for (int fj = 0; fj < filterSize; fj++) {
//original
//sum += input[(i + fi)*(w+border*2)+(j + fj)];
int row = i+fi-border;
int col = j+fj-border;
//the following ifs are a hack to save redundant copying
if (row >= 0 && row < h) {
if (col >= 0 && col < w) {
sum += input[row*w + col];
}
}
}
}
sum /= (float)(filterSize * filterSize);
if (sum < res) {
res = sum;
}
}
//write output
varest[i*w+j] = res;
}
}
/*
* This method computes the variance of an input array, assuming the mean is equal to zero
*
* Thread block size should be power of two because of the reduction.
* The implementation currently assumes only one thread block is used for the entire input array
*
* In case of multiple thread blocks initialize output to zero and use atomic add or another kernel
*
* block_size_x power of 2
*/
#ifndef grid_size_x //hack to see if the Kernel Tuner is being used
#undef block_size_x
#define block_size_x 128
#endif
__global__ void computeVarianceZeroMean(int n, float *output, float *input) {
int x = blockIdx.x * block_size_x + threadIdx.x;
int ti = threadIdx.x;
int step_size = block_size_x * gridDim.x;
float sum = 0.0f;
if (x < n) {
//compute thread-local sums of squares
for (int i=x; i < n; i+=step_size) {
sum += input[i]*input[i];
}
}
//store local sums in shared memory
__shared__ float shmem[block_size_x];
shmem[ti] = sum;
__syncthreads();
//reduce local sums
for (unsigned int s=block_size_x/2; s>0; s>>=1) {
if (ti < s) {
shmem[ti] += shmem[ti + s];
}
__syncthreads();
}
//write result
if (ti == 0) {
output[blockIdx.x] = ( shmem[0] * n ) / ( n - 1 ); //in case of multiple threadblocks write back using atomicAdd
}
}
/*
* Simple CUDA Helper function to reduce the output of a
* reduction kernel with multiple thread blocks to a single value
*
* This function performs a sum of an array of floats
*
* This function is to be called with only a single thread block
*/
__global__ void sumFloats(float *output, float *input, int n) {
int ti = threadIdx.x;
__shared__ float shmem[block_size_x];
//compute thread-local sums
float sum = 0.0f;
for (int i=ti; i < n; i+=block_size_x) {
sum += input[i];
}
//store local sums in shared memory
shmem[ti] = sum;
__syncthreads();
//reduce local sums
for (unsigned int s=block_size_x/2; s>0; s>>=1) {
if (ti < s) {
shmem[ti] += shmem[ti + s];
}
__syncthreads();
}
//write result
if (ti == 0) {
output[0] = shmem[0];
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
/*
* Copyright 2015 Netherlands eScience Center, VU University Amsterdam, and Netherlands Forensic Institute
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This file contains CUDA kernels for applying a Wiener filter to a
* PRNU pattern, as proposed by:
* M. Chen et al. "Determining image origin and integrity using sensor
* noise", IEEE Trans. Inf. Forensics Secur. 3 (2008) 74-90.
*
* The Wiener filter is used to remove JPEG artifacts from a PRNU pattern.
*
* To apply the complete filter:
* apply Fourier transform to the input image
* call computeSquaredMagnitudes() on the frequencies
* call computeVarianceEstimates() on the squared magnitudes
* call computeVarianceZeroMean() on the squared magnitudes
* call scaleWithVariances() scaling the frequencies using the local and global variance
* apply inverse Fourier transform
* normalize result by calling normalizeToReal()
*
* @author Ben van Werkhoven <b.vanwerkhoven@esciencecenter.nl>
* @version 0.1
*/
#ifndef block_size_x
#define block_size_x 32
#endif
#ifndef block_size_y
#define block_size_y 16
#endif
#ifndef reuse_computation
#define reuse_computation 1
#endif
//set the number and size of filters, also adjust max_border
#define FILTERS 4
#define FILTER_SIZES {3, 5, 7, 9}
#define MAX_BORDER 4 //the largest (filter size/2)
#define FLT_MAX 3.40282347e+38f
//function interfaces to prevent C++ garbling the kernel keys
extern "C" {
__global__ void computeSquaredMagnitudes(int h, int w, float* output, float* frequencies);
__global__ void scaleWithVariances(int h, int w, float* output, float* input, float* varianceEstimates, float* variance);
__global__ void toComplex(int h, int w, float* complex, float* input);
__global__ void toReal(int h, int w, float* output, float* complex);
__global__ void computeVarianceZeroMean(int n, float* output, float *input);
__global__ void computeVarianceEstimates(int h, int w, float* varest, float* input);
__global__ void computeVarianceEstimates_naive(int h, int w, float* varest, float* input);
__global__ void normalizeToReal(int h, int w, float* output, float* complex);
__global__ void normalize(int h, int w, float* output, float* complex);
__global__ void sumFloats(float *output, float *input, int n);
}
/**
* Computes the square of each frequency and stores the result as a real.
*/
__global__ void computeSquaredMagnitudes(int h, int w, float* output, float* frequencies) {
int i = threadIdx.y + blockIdx.y * block_size_y;
int j = threadIdx.x + blockIdx.x * block_size_x;
if (j < w && i < h) {
float re = frequencies[i*2*w+(2 * j)];
float im = frequencies[i*2*w+(2 * j + 1)];
output[i*w+j] = (re * re) + (im * im);
}
}
/**
* This kernel scales the frequencies in input with a combination of the global variance and an estimate
* for the local variance at that position. Effectively this cleans the input pattern from low frequency
* noise.
*/
__global__ void scaleWithVariances(int h, int w, float* output, float* input, float* varianceEstimates, float* variance) {
int i = threadIdx.y + blockIdx.y * block_size_y;
int j = threadIdx.x + blockIdx.x * block_size_x;
float var = variance[0];
if (j < w && i < h) {
float scale = var / max(var, varianceEstimates[i*w+j]);
output[i*2*w+(j * 2)] = input[i*2*w+(j*2)] * scale;
output[i*2*w+(j * 2 + 1)] = input[i*2*w+(j * 2 + 1)] * scale;
}
}
/**
* Simple helper kernel to convert an array of real values to an array of complex values
*/
__global__ void toComplex(int h, int w, float* complex, float* input) {
int i = threadIdx.y + blockIdx.y * block_size_y;
int j = threadIdx.x + blockIdx.x * block_size_x;
if (i < h && j < w) {
complex[i * w * 2 + 2 * j] = input[i * w + j];
complex[i * w * 2 + (2 * j + 1)] = 0.0f;
}
}
/**
* Simple helper kernel to convert a complex array to an array of real values
*/
__global__ void toReal(int h, int w, float* output, float* complex) {
int i = threadIdx.y + blockIdx.y * block_size_y;
int j = threadIdx.x + blockIdx.x * block_size_x;
if (i < h && j < w) {
output[i*w+j] = complex[i * w * 2 + 2 * j];
}
}
/**
* This kernel normalizes the input by dividing it by the number of pixels in the image.
* It takes an array of complex numbers as input, but only stores the real values.
*/
__global__ void normalizeToReal(int h, int w, float* output, float* complex) {
int i = threadIdx.y + blockIdx.y * block_size_y;
int j = threadIdx.x + blockIdx.x * block_size_x;
if (i < h && j < w) {
output[i*w+j] = (complex[i * w * 2 + 2 * j] / (float)(w * h));
}
}
/**
* This kernel normalizes the complex input by dividing it by the number of pixels in the image.
*/
__global__ void normalize(int h, int w, float* complex_out, float* complex_in) {
int i = threadIdx.y + blockIdx.y * block_size_y;
int j = threadIdx.x + blockIdx.x * block_size_x;
if (i < h && j < w) {
complex_out[i*w*2+2*j ] = (complex_in[i*w*2+2*j ] / (float)(w*h));
complex_out[i*w*2+2*j+1] = (complex_in[i*w*2+2*j+1] / (float)(w*h));
}
}
/**
* computeVarianceEstimates uses a number of simple filters to compute a minimum local variance
*
* Instead of using multiple arrays with zeroed borders around them, the loading phase of this
* kernel writes a zero to shared memory instead of loading a border value from global memory.
* The filters can then be performed as normal on the data in shared memory. Because of this
* MAX_BORDER needs to be set accordingly.
*
*/
__global__ void computeVarianceEstimates(int h, int w, float* varest, float* input) {
int ty = threadIdx.y;
int tx = threadIdx.x;
int i = blockIdx.y * block_size_y;
int j = blockIdx.x * block_size_x;
__shared__ float shinput[block_size_y+2*MAX_BORDER][block_size_x+2*MAX_BORDER];
//the loading phase of the kernel, which writes 0.0f to shared memory if the index
//is outside the input
int yEnd = block_size_y+2*MAX_BORDER;
int xEnd = block_size_x+2*MAX_BORDER;
for (int y=ty; y < yEnd; y+= block_size_y) {
for (int x=tx; x < xEnd; x+= block_size_x) {
float in = 0.0f;
int indexy = i+y-MAX_BORDER;
int indexx = j+x-MAX_BORDER;
if (indexy >= 0 && indexy < h) {
if (indexx >= 0 && indexx < w) {
in = input[indexy*w+indexx];
}
}
shinput[y][x] = in;
}
}
__syncthreads();
const int filter[FILTERS] = FILTER_SIZES;
float res = FLT_MAX;
#if reuse_computation == 0
//perform filtering without reusing the sum from smaller filters
for (int f = 0; f < FILTERS; f++) {
int filterSize = filter[f];
int offset = MAX_BORDER-(filterSize/2);
//do a convolution
float sum = 0.0f;
for (int fi = 0; fi < filterSize; fi++) {
for (int fj = 0; fj < filterSize; fj++) {
sum += shinput[ty+fi+offset][tx+fj+offset];
}
}
sum /= (float)(filterSize * filterSize);
//store minimum
res = sum < res ? sum : res;
}
#elif reuse_computation == 1
//perform filtering while reusing the sum from smaller filters
//start from center pixel
float sum = shinput[ty+MAX_BORDER][tx+MAX_BORDER];
//add sides of the square filter to sum and store minimum average
for (int f = 0; f < FILTERS; f++) {
int filterSize = filter[f];
int offset = MAX_BORDER-(filterSize/2);
//top and bottom row
for (int fj=0; fj<filterSize; fj++) {
sum += shinput[ty+0+offset][tx+fj+offset];
sum += shinput[ty+filterSize-1+offset][tx+fj+offset];
}
//two sides (between top and bottom rows)
for (int fi=1; fi<filterSize-1; fi++) {
sum += shinput[ty+fi+offset][tx+0+offset];
sum += shinput[ty+fi+offset][tx+filterSize-1+offset];
}
//store minimum
float avg = sum / (filterSize*filterSize);
res = avg < res ? avg : res;
}
#endif
//write output
if (i + ty < h) {
if (j + tx < w) {
varest[(i+ty)*w + (j+tx)] = res;
}
}
}
/**
* This method is a naive implementation of computeVarianceEstimates used for correctness checks
*/
__global__ void computeVarianceEstimates_naive(int h, int w, float* varest, float* input) {
int i = threadIdx.y + blockIdx.y * block_size_y;
int j = threadIdx.x + blockIdx.x * block_size_x;
float res = FLT_MAX;
if (i < h && j < w) {
const int filter[FILTERS] = FILTER_SIZES;
for (int f = 0; f < FILTERS; f++) {
int filterSize = filter[f];
int border = filterSize/2;
//do a convolution
float sum = 0.0f;
for (int fi = 0; fi < filterSize; fi++) {
for (int fj = 0; fj < filterSize; fj++) {
//original
//sum += input[(i + fi)*(w+border*2)+(j + fj)];
int row = i+fi-border;
int col = j+fj-border;
//the following ifs are a hack to save redundant copying
if (row >= 0 && row < h) {
if (col >= 0 && col < w) {
sum += input[row*w + col];
}
}
}
}
sum /= (float)(filterSize * filterSize);
if (sum < res) {
res = sum;
}
}
//write output
varest[i*w+j] = res;
}
}
/*
* This method computes the variance of an input array, assuming the mean is equal to zero
*
* Thread block size should be power of two because of the reduction.
* The implementation currently assumes only one thread block is used for the entire input array
*
* In case of multiple thread blocks initialize output to zero and use atomic add or another kernel
*
* block_size_x power of 2
*/
#ifndef grid_size_x //hack to see if the Kernel Tuner is being used
#undef block_size_x
#define block_size_x 128
#endif
__global__ void computeVarianceZeroMean(int n, float *output, float *input) {
int x = blockIdx.x * block_size_x + threadIdx.x;
int ti = threadIdx.x;
int step_size = block_size_x * gridDim.x;
float sum = 0.0f;
if (x < n) {
//compute thread-local sums of squares
for (int i=x; i < n; i+=step_size) {
sum += input[i]*input[i];
}
}
//store local sums in shared memory
__shared__ float shmem[block_size_x];
shmem[ti] = sum;
__syncthreads();
//reduce local sums
for (unsigned int s=block_size_x/2; s>0; s>>=1) {
if (ti < s) {
shmem[ti] += shmem[ti + s];
}
__syncthreads();
}
//write result
if (ti == 0) {
output[blockIdx.x] = ( shmem[0] * n ) / ( n - 1 ); //in case of multiple threadblocks write back using atomicAdd
}
}
/*
* Simple CUDA Helper function to reduce the output of a
* reduction kernel with multiple thread blocks to a single value
*
* This function performs a sum of an array of floats
*
* This function is to be called with only a single thread block
*/
__global__ void sumFloats(float *output, float *input, int n) {
int ti = threadIdx.x;
__shared__ float shmem[block_size_x];
//compute thread-local sums
float sum = 0.0f;
for (int i=ti; i < n; i+=block_size_x) {
sum += input[i];
}
//store local sums in shared memory
shmem[ti] = sum;
__syncthreads();
//reduce local sums
for (unsigned int s=block_size_x/2; s>0; s>>=1) {
if (ti < s) {
shmem[ti] += shmem[ti + s];
}
__syncthreads();
}
//write result
if (ti == 0) {
output[0] = shmem[0];
}
} | .text
.file "wienerfilter.hip"
.globl __device_stub__computeSquaredMagnitudes # -- Begin function __device_stub__computeSquaredMagnitudes
.p2align 4, 0x90
.type __device_stub__computeSquaredMagnitudes,@function
__device_stub__computeSquaredMagnitudes: # @__device_stub__computeSquaredMagnitudes
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movl %edi, 12(%rsp)
movl %esi, 8(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 8(%rsp), %rax
movq %rax, 88(%rsp)
leaq 72(%rsp), %rax
movq %rax, 96(%rsp)
leaq 64(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $computeSquaredMagnitudes, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size __device_stub__computeSquaredMagnitudes, .Lfunc_end0-__device_stub__computeSquaredMagnitudes
.cfi_endproc
# -- End function
.globl __device_stub__scaleWithVariances # -- Begin function __device_stub__scaleWithVariances
.p2align 4, 0x90
.type __device_stub__scaleWithVariances,@function
__device_stub__scaleWithVariances: # @__device_stub__scaleWithVariances
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movl %edi, 12(%rsp)
movl %esi, 8(%rsp)
movq %rdx, 88(%rsp)
movq %rcx, 80(%rsp)
movq %r8, 72(%rsp)
movq %r9, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 88(%rsp), %rax
movq %rax, 112(%rsp)
leaq 80(%rsp), %rax
movq %rax, 120(%rsp)
leaq 72(%rsp), %rax
movq %rax, 128(%rsp)
leaq 64(%rsp), %rax
movq %rax, 136(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 96(%rsp), %r9
movl $scaleWithVariances, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end1:
.size __device_stub__scaleWithVariances, .Lfunc_end1-__device_stub__scaleWithVariances
.cfi_endproc
# -- End function
.globl __device_stub__toComplex # -- Begin function __device_stub__toComplex
.p2align 4, 0x90
.type __device_stub__toComplex,@function
__device_stub__toComplex: # @__device_stub__toComplex
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movl %edi, 12(%rsp)
movl %esi, 8(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 8(%rsp), %rax
movq %rax, 88(%rsp)
leaq 72(%rsp), %rax
movq %rax, 96(%rsp)
leaq 64(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $toComplex, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end2:
.size __device_stub__toComplex, .Lfunc_end2-__device_stub__toComplex
.cfi_endproc
# -- End function
.globl __device_stub__toReal # -- Begin function __device_stub__toReal
.p2align 4, 0x90
.type __device_stub__toReal,@function
__device_stub__toReal: # @__device_stub__toReal
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movl %edi, 12(%rsp)
movl %esi, 8(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 8(%rsp), %rax
movq %rax, 88(%rsp)
leaq 72(%rsp), %rax
movq %rax, 96(%rsp)
leaq 64(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $toReal, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end3:
.size __device_stub__toReal, .Lfunc_end3-__device_stub__toReal
.cfi_endproc
# -- End function
.globl __device_stub__normalizeToReal # -- Begin function __device_stub__normalizeToReal
.p2align 4, 0x90
.type __device_stub__normalizeToReal,@function
__device_stub__normalizeToReal: # @__device_stub__normalizeToReal
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movl %edi, 12(%rsp)
movl %esi, 8(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 8(%rsp), %rax
movq %rax, 88(%rsp)
leaq 72(%rsp), %rax
movq %rax, 96(%rsp)
leaq 64(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $normalizeToReal, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end4:
.size __device_stub__normalizeToReal, .Lfunc_end4-__device_stub__normalizeToReal
.cfi_endproc
# -- End function
.globl __device_stub__normalize # -- Begin function __device_stub__normalize
.p2align 4, 0x90
.type __device_stub__normalize,@function
__device_stub__normalize: # @__device_stub__normalize
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movl %edi, 12(%rsp)
movl %esi, 8(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 8(%rsp), %rax
movq %rax, 88(%rsp)
leaq 72(%rsp), %rax
movq %rax, 96(%rsp)
leaq 64(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $normalize, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end5:
.size __device_stub__normalize, .Lfunc_end5-__device_stub__normalize
.cfi_endproc
# -- End function
.globl __device_stub__computeVarianceEstimates # -- Begin function __device_stub__computeVarianceEstimates
.p2align 4, 0x90
.type __device_stub__computeVarianceEstimates,@function
__device_stub__computeVarianceEstimates: # @__device_stub__computeVarianceEstimates
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movl %edi, 12(%rsp)
movl %esi, 8(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 8(%rsp), %rax
movq %rax, 88(%rsp)
leaq 72(%rsp), %rax
movq %rax, 96(%rsp)
leaq 64(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $computeVarianceEstimates, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end6:
.size __device_stub__computeVarianceEstimates, .Lfunc_end6-__device_stub__computeVarianceEstimates
.cfi_endproc
# -- End function
.globl __device_stub__computeVarianceEstimates_naive # -- Begin function __device_stub__computeVarianceEstimates_naive
.p2align 4, 0x90
.type __device_stub__computeVarianceEstimates_naive,@function
__device_stub__computeVarianceEstimates_naive: # @__device_stub__computeVarianceEstimates_naive
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movl %edi, 12(%rsp)
movl %esi, 8(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 8(%rsp), %rax
movq %rax, 88(%rsp)
leaq 72(%rsp), %rax
movq %rax, 96(%rsp)
leaq 64(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $computeVarianceEstimates_naive, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end7:
.size __device_stub__computeVarianceEstimates_naive, .Lfunc_end7-__device_stub__computeVarianceEstimates_naive
.cfi_endproc
# -- End function
.globl __device_stub__computeVarianceZeroMean # -- Begin function __device_stub__computeVarianceZeroMean
.p2align 4, 0x90
.type __device_stub__computeVarianceZeroMean,@function
__device_stub__computeVarianceZeroMean: # @__device_stub__computeVarianceZeroMean
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movl %edi, 12(%rsp)
movq %rsi, 72(%rsp)
movq %rdx, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 72(%rsp), %rax
movq %rax, 88(%rsp)
leaq 64(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $computeVarianceZeroMean, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end8:
.size __device_stub__computeVarianceZeroMean, .Lfunc_end8-__device_stub__computeVarianceZeroMean
.cfi_endproc
# -- End function
.globl __device_stub__sumFloats # -- Begin function __device_stub__sumFloats
.p2align 4, 0x90
.type __device_stub__sumFloats,@function
__device_stub__sumFloats: # @__device_stub__sumFloats
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $sumFloats, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end9:
.size __device_stub__sumFloats, .Lfunc_end9-__device_stub__sumFloats
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB10_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB10_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $computeSquaredMagnitudes, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $scaleWithVariances, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $toComplex, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $toReal, %esi
movl $.L__unnamed_4, %edx
movl $.L__unnamed_4, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $normalizeToReal, %esi
movl $.L__unnamed_5, %edx
movl $.L__unnamed_5, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $normalize, %esi
movl $.L__unnamed_6, %edx
movl $.L__unnamed_6, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $computeVarianceEstimates, %esi
movl $.L__unnamed_7, %edx
movl $.L__unnamed_7, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $computeVarianceEstimates_naive, %esi
movl $.L__unnamed_8, %edx
movl $.L__unnamed_8, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $computeVarianceZeroMean, %esi
movl $.L__unnamed_9, %edx
movl $.L__unnamed_9, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $sumFloats, %esi
movl $.L__unnamed_10, %edx
movl $.L__unnamed_10, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end10:
.size __hip_module_ctor, .Lfunc_end10-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB11_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB11_2:
retq
.Lfunc_end11:
.size __hip_module_dtor, .Lfunc_end11-__hip_module_dtor
.cfi_endproc
# -- End function
.type computeSquaredMagnitudes,@object # @computeSquaredMagnitudes
.section .rodata,"a",@progbits
.globl computeSquaredMagnitudes
.p2align 3, 0x0
computeSquaredMagnitudes:
.quad __device_stub__computeSquaredMagnitudes
.size computeSquaredMagnitudes, 8
.type scaleWithVariances,@object # @scaleWithVariances
.globl scaleWithVariances
.p2align 3, 0x0
scaleWithVariances:
.quad __device_stub__scaleWithVariances
.size scaleWithVariances, 8
.type toComplex,@object # @toComplex
.globl toComplex
.p2align 3, 0x0
toComplex:
.quad __device_stub__toComplex
.size toComplex, 8
.type toReal,@object # @toReal
.globl toReal
.p2align 3, 0x0
toReal:
.quad __device_stub__toReal
.size toReal, 8
.type normalizeToReal,@object # @normalizeToReal
.globl normalizeToReal
.p2align 3, 0x0
normalizeToReal:
.quad __device_stub__normalizeToReal
.size normalizeToReal, 8
.type normalize,@object # @normalize
.globl normalize
.p2align 3, 0x0
normalize:
.quad __device_stub__normalize
.size normalize, 8
.type computeVarianceEstimates,@object # @computeVarianceEstimates
.globl computeVarianceEstimates
.p2align 3, 0x0
computeVarianceEstimates:
.quad __device_stub__computeVarianceEstimates
.size computeVarianceEstimates, 8
.type computeVarianceEstimates_naive,@object # @computeVarianceEstimates_naive
.globl computeVarianceEstimates_naive
.p2align 3, 0x0
computeVarianceEstimates_naive:
.quad __device_stub__computeVarianceEstimates_naive
.size computeVarianceEstimates_naive, 8
.type computeVarianceZeroMean,@object # @computeVarianceZeroMean
.globl computeVarianceZeroMean
.p2align 3, 0x0
computeVarianceZeroMean:
.quad __device_stub__computeVarianceZeroMean
.size computeVarianceZeroMean, 8
.type sumFloats,@object # @sumFloats
.globl sumFloats
.p2align 3, 0x0
sumFloats:
.quad __device_stub__sumFloats
.size sumFloats, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "computeSquaredMagnitudes"
.size .L__unnamed_1, 25
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "scaleWithVariances"
.size .L__unnamed_2, 19
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "toComplex"
.size .L__unnamed_3, 10
.type .L__unnamed_4,@object # @3
.L__unnamed_4:
.asciz "toReal"
.size .L__unnamed_4, 7
.type .L__unnamed_5,@object # @4
.L__unnamed_5:
.asciz "normalizeToReal"
.size .L__unnamed_5, 16
.type .L__unnamed_6,@object # @5
.L__unnamed_6:
.asciz "normalize"
.size .L__unnamed_6, 10
.type .L__unnamed_7,@object # @6
.L__unnamed_7:
.asciz "computeVarianceEstimates"
.size .L__unnamed_7, 25
.type .L__unnamed_8,@object # @7
.L__unnamed_8:
.asciz "computeVarianceEstimates_naive"
.size .L__unnamed_8, 31
.type .L__unnamed_9,@object # @8
.L__unnamed_9:
.asciz "computeVarianceZeroMean"
.size .L__unnamed_9, 24
.type .L__unnamed_10,@object # @9
.L__unnamed_10:
.asciz "sumFloats"
.size .L__unnamed_10, 10
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __device_stub__computeSquaredMagnitudes
.addrsig_sym __device_stub__scaleWithVariances
.addrsig_sym __device_stub__toComplex
.addrsig_sym __device_stub__toReal
.addrsig_sym __device_stub__normalizeToReal
.addrsig_sym __device_stub__normalize
.addrsig_sym __device_stub__computeVarianceEstimates
.addrsig_sym __device_stub__computeVarianceEstimates_naive
.addrsig_sym __device_stub__computeVarianceZeroMean
.addrsig_sym __device_stub__sumFloats
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym computeSquaredMagnitudes
.addrsig_sym scaleWithVariances
.addrsig_sym toComplex
.addrsig_sym toReal
.addrsig_sym normalizeToReal
.addrsig_sym normalize
.addrsig_sym computeVarianceEstimates
.addrsig_sym computeVarianceEstimates_naive
.addrsig_sym computeVarianceZeroMean
.addrsig_sym sumFloats
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_001bdbf1_00000000-6_wienerfilter.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z48__device_stub__Z24computeSquaredMagnitudesiiPfS_iiPfS_
.type _Z48__device_stub__Z24computeSquaredMagnitudesiiPfS_iiPfS_, @function
_Z48__device_stub__Z24computeSquaredMagnitudesiiPfS_iiPfS_:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movl %edi, 28(%rsp)
movl %esi, 24(%rsp)
movq %rdx, 16(%rsp)
movq %rcx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 24(%rsp), %rax
movq %rax, 104(%rsp)
leaq 16(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq computeSquaredMagnitudes(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z48__device_stub__Z24computeSquaredMagnitudesiiPfS_iiPfS_, .-_Z48__device_stub__Z24computeSquaredMagnitudesiiPfS_iiPfS_
.globl computeSquaredMagnitudes
.type computeSquaredMagnitudes, @function
computeSquaredMagnitudes:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z48__device_stub__Z24computeSquaredMagnitudesiiPfS_iiPfS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size computeSquaredMagnitudes, .-computeSquaredMagnitudes
.globl _Z46__device_stub__Z18scaleWithVariancesiiPfS_S_S_iiPfS_S_S_
.type _Z46__device_stub__Z18scaleWithVariancesiiPfS_S_S_iiPfS_S_S_, @function
_Z46__device_stub__Z18scaleWithVariancesiiPfS_S_S_iiPfS_S_S_:
.LFB2053:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movl %edi, 44(%rsp)
movl %esi, 40(%rsp)
movq %rdx, 32(%rsp)
movq %rcx, 24(%rsp)
movq %r8, 16(%rsp)
movq %r9, 8(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 44(%rsp), %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rax
movq %rax, 120(%rsp)
leaq 32(%rsp), %rax
movq %rax, 128(%rsp)
leaq 24(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 8(%rsp), %rax
movq %rax, 152(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq scaleWithVariances(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2053:
.size _Z46__device_stub__Z18scaleWithVariancesiiPfS_S_S_iiPfS_S_S_, .-_Z46__device_stub__Z18scaleWithVariancesiiPfS_S_S_iiPfS_S_S_
.globl scaleWithVariances
.type scaleWithVariances, @function
scaleWithVariances:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z46__device_stub__Z18scaleWithVariancesiiPfS_S_S_iiPfS_S_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size scaleWithVariances, .-scaleWithVariances
.globl _Z32__device_stub__Z9toComplexiiPfS_iiPfS_
.type _Z32__device_stub__Z9toComplexiiPfS_iiPfS_, @function
_Z32__device_stub__Z9toComplexiiPfS_iiPfS_:
.LFB2055:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movl %edi, 28(%rsp)
movl %esi, 24(%rsp)
movq %rdx, 16(%rsp)
movq %rcx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 24(%rsp), %rax
movq %rax, 104(%rsp)
leaq 16(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L23
.L19:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L24
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L23:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq toComplex(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L19
.L24:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2055:
.size _Z32__device_stub__Z9toComplexiiPfS_iiPfS_, .-_Z32__device_stub__Z9toComplexiiPfS_iiPfS_
.globl toComplex
.type toComplex, @function
toComplex:
.LFB2056:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z32__device_stub__Z9toComplexiiPfS_iiPfS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2056:
.size toComplex, .-toComplex
.globl _Z29__device_stub__Z6toRealiiPfS_iiPfS_
.type _Z29__device_stub__Z6toRealiiPfS_iiPfS_, @function
_Z29__device_stub__Z6toRealiiPfS_iiPfS_:
.LFB2057:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movl %edi, 28(%rsp)
movl %esi, 24(%rsp)
movq %rdx, 16(%rsp)
movq %rcx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 24(%rsp), %rax
movq %rax, 104(%rsp)
leaq 16(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L31
.L27:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L32
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L31:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq toReal(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L27
.L32:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size _Z29__device_stub__Z6toRealiiPfS_iiPfS_, .-_Z29__device_stub__Z6toRealiiPfS_iiPfS_
.globl toReal
.type toReal, @function
toReal:
.LFB2058:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z29__device_stub__Z6toRealiiPfS_iiPfS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2058:
.size toReal, .-toReal
.globl _Z39__device_stub__Z15normalizeToRealiiPfS_iiPfS_
.type _Z39__device_stub__Z15normalizeToRealiiPfS_iiPfS_, @function
_Z39__device_stub__Z15normalizeToRealiiPfS_iiPfS_:
.LFB2059:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movl %edi, 28(%rsp)
movl %esi, 24(%rsp)
movq %rdx, 16(%rsp)
movq %rcx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 24(%rsp), %rax
movq %rax, 104(%rsp)
leaq 16(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L39
.L35:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L40
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L39:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq normalizeToReal(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L35
.L40:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2059:
.size _Z39__device_stub__Z15normalizeToRealiiPfS_iiPfS_, .-_Z39__device_stub__Z15normalizeToRealiiPfS_iiPfS_
.globl normalizeToReal
.type normalizeToReal, @function
normalizeToReal:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z39__device_stub__Z15normalizeToRealiiPfS_iiPfS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size normalizeToReal, .-normalizeToReal
.globl _Z32__device_stub__Z9normalizeiiPfS_iiPfS_
.type _Z32__device_stub__Z9normalizeiiPfS_iiPfS_, @function
_Z32__device_stub__Z9normalizeiiPfS_iiPfS_:
.LFB2061:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movl %edi, 28(%rsp)
movl %esi, 24(%rsp)
movq %rdx, 16(%rsp)
movq %rcx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 24(%rsp), %rax
movq %rax, 104(%rsp)
leaq 16(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L47
.L43:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L48
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L47:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq normalize(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L43
.L48:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2061:
.size _Z32__device_stub__Z9normalizeiiPfS_iiPfS_, .-_Z32__device_stub__Z9normalizeiiPfS_iiPfS_
.globl normalize
.type normalize, @function
normalize:
.LFB2062:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z32__device_stub__Z9normalizeiiPfS_iiPfS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2062:
.size normalize, .-normalize
.globl _Z48__device_stub__Z24computeVarianceEstimatesiiPfS_iiPfS_
.type _Z48__device_stub__Z24computeVarianceEstimatesiiPfS_iiPfS_, @function
_Z48__device_stub__Z24computeVarianceEstimatesiiPfS_iiPfS_:
.LFB2063:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movl %edi, 28(%rsp)
movl %esi, 24(%rsp)
movq %rdx, 16(%rsp)
movq %rcx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 24(%rsp), %rax
movq %rax, 104(%rsp)
leaq 16(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L55
.L51:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L56
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L55:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq computeVarianceEstimates(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L51
.L56:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2063:
.size _Z48__device_stub__Z24computeVarianceEstimatesiiPfS_iiPfS_, .-_Z48__device_stub__Z24computeVarianceEstimatesiiPfS_iiPfS_
.globl computeVarianceEstimates
.type computeVarianceEstimates, @function
computeVarianceEstimates:
.LFB2064:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z48__device_stub__Z24computeVarianceEstimatesiiPfS_iiPfS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2064:
.size computeVarianceEstimates, .-computeVarianceEstimates
.globl _Z54__device_stub__Z30computeVarianceEstimates_naiveiiPfS_iiPfS_
.type _Z54__device_stub__Z30computeVarianceEstimates_naiveiiPfS_iiPfS_, @function
_Z54__device_stub__Z30computeVarianceEstimates_naiveiiPfS_iiPfS_:
.LFB2065:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movl %edi, 28(%rsp)
movl %esi, 24(%rsp)
movq %rdx, 16(%rsp)
movq %rcx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 24(%rsp), %rax
movq %rax, 104(%rsp)
leaq 16(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L63
.L59:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L64
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L63:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq computeVarianceEstimates_naive(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L59
.L64:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2065:
.size _Z54__device_stub__Z30computeVarianceEstimates_naiveiiPfS_iiPfS_, .-_Z54__device_stub__Z30computeVarianceEstimates_naiveiiPfS_iiPfS_
.globl computeVarianceEstimates_naive
.type computeVarianceEstimates_naive, @function
computeVarianceEstimates_naive:
.LFB2066:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z54__device_stub__Z30computeVarianceEstimates_naiveiiPfS_iiPfS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2066:
.size computeVarianceEstimates_naive, .-computeVarianceEstimates_naive
.globl _Z46__device_stub__Z23computeVarianceZeroMeaniPfS_iPfS_
.type _Z46__device_stub__Z23computeVarianceZeroMeaniPfS_iPfS_, @function
_Z46__device_stub__Z23computeVarianceZeroMeaniPfS_iPfS_:
.LFB2067:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movl %edi, 28(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L71
.L67:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L72
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L71:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq computeVarianceZeroMean(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L67
.L72:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2067:
.size _Z46__device_stub__Z23computeVarianceZeroMeaniPfS_iPfS_, .-_Z46__device_stub__Z23computeVarianceZeroMeaniPfS_iPfS_
.globl computeVarianceZeroMean
.type computeVarianceZeroMean, @function
computeVarianceZeroMean:
.LFB2068:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z46__device_stub__Z23computeVarianceZeroMeaniPfS_iPfS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2068:
.size computeVarianceZeroMean, .-computeVarianceZeroMean
.globl _Z31__device_stub__Z9sumFloatsPfS_iPfS_i
.type _Z31__device_stub__Z9sumFloatsPfS_iPfS_i, @function
_Z31__device_stub__Z9sumFloatsPfS_iPfS_i:
.LFB2069:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L79
.L75:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L80
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L79:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq sumFloats(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L75
.L80:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2069:
.size _Z31__device_stub__Z9sumFloatsPfS_iPfS_i, .-_Z31__device_stub__Z9sumFloatsPfS_iPfS_i
.globl sumFloats
.type sumFloats, @function
sumFloats:
.LFB2070:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z31__device_stub__Z9sumFloatsPfS_iPfS_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2070:
.size sumFloats, .-sumFloats
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "sumFloats"
.LC1:
.string "computeVarianceZeroMean"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC2:
.string "computeVarianceEstimates_naive"
.section .rodata.str1.1
.LC3:
.string "computeVarianceEstimates"
.LC4:
.string "normalize"
.LC5:
.string "normalizeToReal"
.LC6:
.string "toReal"
.LC7:
.string "toComplex"
.LC8:
.string "scaleWithVariances"
.LC9:
.string "computeSquaredMagnitudes"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2072:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq sumFloats(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq computeVarianceZeroMean(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq computeVarianceEstimates_naive(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq computeVarianceEstimates(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC4(%rip), %rdx
movq %rdx, %rcx
leaq normalize(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC5(%rip), %rdx
movq %rdx, %rcx
leaq normalizeToReal(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC6(%rip), %rdx
movq %rdx, %rcx
leaq toReal(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC7(%rip), %rdx
movq %rdx, %rcx
leaq toComplex(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC8(%rip), %rdx
movq %rdx, %rcx
leaq scaleWithVariances(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC9(%rip), %rdx
movq %rdx, %rcx
leaq computeSquaredMagnitudes(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2072:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "wienerfilter.hip"
.globl __device_stub__computeSquaredMagnitudes # -- Begin function __device_stub__computeSquaredMagnitudes
.p2align 4, 0x90
.type __device_stub__computeSquaredMagnitudes,@function
__device_stub__computeSquaredMagnitudes: # @__device_stub__computeSquaredMagnitudes
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movl %edi, 12(%rsp)
movl %esi, 8(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 8(%rsp), %rax
movq %rax, 88(%rsp)
leaq 72(%rsp), %rax
movq %rax, 96(%rsp)
leaq 64(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $computeSquaredMagnitudes, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size __device_stub__computeSquaredMagnitudes, .Lfunc_end0-__device_stub__computeSquaredMagnitudes
.cfi_endproc
# -- End function
.globl __device_stub__scaleWithVariances # -- Begin function __device_stub__scaleWithVariances
.p2align 4, 0x90
.type __device_stub__scaleWithVariances,@function
__device_stub__scaleWithVariances: # @__device_stub__scaleWithVariances
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movl %edi, 12(%rsp)
movl %esi, 8(%rsp)
movq %rdx, 88(%rsp)
movq %rcx, 80(%rsp)
movq %r8, 72(%rsp)
movq %r9, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 88(%rsp), %rax
movq %rax, 112(%rsp)
leaq 80(%rsp), %rax
movq %rax, 120(%rsp)
leaq 72(%rsp), %rax
movq %rax, 128(%rsp)
leaq 64(%rsp), %rax
movq %rax, 136(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 96(%rsp), %r9
movl $scaleWithVariances, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end1:
.size __device_stub__scaleWithVariances, .Lfunc_end1-__device_stub__scaleWithVariances
.cfi_endproc
# -- End function
.globl __device_stub__toComplex # -- Begin function __device_stub__toComplex
.p2align 4, 0x90
.type __device_stub__toComplex,@function
__device_stub__toComplex: # @__device_stub__toComplex
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movl %edi, 12(%rsp)
movl %esi, 8(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 8(%rsp), %rax
movq %rax, 88(%rsp)
leaq 72(%rsp), %rax
movq %rax, 96(%rsp)
leaq 64(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $toComplex, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end2:
.size __device_stub__toComplex, .Lfunc_end2-__device_stub__toComplex
.cfi_endproc
# -- End function
.globl __device_stub__toReal # -- Begin function __device_stub__toReal
.p2align 4, 0x90
.type __device_stub__toReal,@function
__device_stub__toReal: # @__device_stub__toReal
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movl %edi, 12(%rsp)
movl %esi, 8(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 8(%rsp), %rax
movq %rax, 88(%rsp)
leaq 72(%rsp), %rax
movq %rax, 96(%rsp)
leaq 64(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $toReal, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end3:
.size __device_stub__toReal, .Lfunc_end3-__device_stub__toReal
.cfi_endproc
# -- End function
.globl __device_stub__normalizeToReal # -- Begin function __device_stub__normalizeToReal
.p2align 4, 0x90
.type __device_stub__normalizeToReal,@function
__device_stub__normalizeToReal: # @__device_stub__normalizeToReal
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movl %edi, 12(%rsp)
movl %esi, 8(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 8(%rsp), %rax
movq %rax, 88(%rsp)
leaq 72(%rsp), %rax
movq %rax, 96(%rsp)
leaq 64(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $normalizeToReal, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end4:
.size __device_stub__normalizeToReal, .Lfunc_end4-__device_stub__normalizeToReal
.cfi_endproc
# -- End function
.globl __device_stub__normalize # -- Begin function __device_stub__normalize
.p2align 4, 0x90
.type __device_stub__normalize,@function
__device_stub__normalize: # @__device_stub__normalize
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movl %edi, 12(%rsp)
movl %esi, 8(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 8(%rsp), %rax
movq %rax, 88(%rsp)
leaq 72(%rsp), %rax
movq %rax, 96(%rsp)
leaq 64(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $normalize, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end5:
.size __device_stub__normalize, .Lfunc_end5-__device_stub__normalize
.cfi_endproc
# -- End function
.globl __device_stub__computeVarianceEstimates # -- Begin function __device_stub__computeVarianceEstimates
.p2align 4, 0x90
.type __device_stub__computeVarianceEstimates,@function
__device_stub__computeVarianceEstimates: # @__device_stub__computeVarianceEstimates
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movl %edi, 12(%rsp)
movl %esi, 8(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 8(%rsp), %rax
movq %rax, 88(%rsp)
leaq 72(%rsp), %rax
movq %rax, 96(%rsp)
leaq 64(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $computeVarianceEstimates, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end6:
.size __device_stub__computeVarianceEstimates, .Lfunc_end6-__device_stub__computeVarianceEstimates
.cfi_endproc
# -- End function
.globl __device_stub__computeVarianceEstimates_naive # -- Begin function __device_stub__computeVarianceEstimates_naive
.p2align 4, 0x90
.type __device_stub__computeVarianceEstimates_naive,@function
__device_stub__computeVarianceEstimates_naive: # @__device_stub__computeVarianceEstimates_naive
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movl %edi, 12(%rsp)
movl %esi, 8(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 8(%rsp), %rax
movq %rax, 88(%rsp)
leaq 72(%rsp), %rax
movq %rax, 96(%rsp)
leaq 64(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $computeVarianceEstimates_naive, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end7:
.size __device_stub__computeVarianceEstimates_naive, .Lfunc_end7-__device_stub__computeVarianceEstimates_naive
.cfi_endproc
# -- End function
.globl __device_stub__computeVarianceZeroMean # -- Begin function __device_stub__computeVarianceZeroMean
.p2align 4, 0x90
.type __device_stub__computeVarianceZeroMean,@function
__device_stub__computeVarianceZeroMean: # @__device_stub__computeVarianceZeroMean
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movl %edi, 12(%rsp)
movq %rsi, 72(%rsp)
movq %rdx, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 72(%rsp), %rax
movq %rax, 88(%rsp)
leaq 64(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $computeVarianceZeroMean, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end8:
.size __device_stub__computeVarianceZeroMean, .Lfunc_end8-__device_stub__computeVarianceZeroMean
.cfi_endproc
# -- End function
.globl __device_stub__sumFloats # -- Begin function __device_stub__sumFloats
.p2align 4, 0x90
.type __device_stub__sumFloats,@function
__device_stub__sumFloats: # @__device_stub__sumFloats
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $sumFloats, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end9:
.size __device_stub__sumFloats, .Lfunc_end9-__device_stub__sumFloats
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB10_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB10_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $computeSquaredMagnitudes, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $scaleWithVariances, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $toComplex, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $toReal, %esi
movl $.L__unnamed_4, %edx
movl $.L__unnamed_4, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $normalizeToReal, %esi
movl $.L__unnamed_5, %edx
movl $.L__unnamed_5, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $normalize, %esi
movl $.L__unnamed_6, %edx
movl $.L__unnamed_6, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $computeVarianceEstimates, %esi
movl $.L__unnamed_7, %edx
movl $.L__unnamed_7, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $computeVarianceEstimates_naive, %esi
movl $.L__unnamed_8, %edx
movl $.L__unnamed_8, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $computeVarianceZeroMean, %esi
movl $.L__unnamed_9, %edx
movl $.L__unnamed_9, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $sumFloats, %esi
movl $.L__unnamed_10, %edx
movl $.L__unnamed_10, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end10:
.size __hip_module_ctor, .Lfunc_end10-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB11_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB11_2:
retq
.Lfunc_end11:
.size __hip_module_dtor, .Lfunc_end11-__hip_module_dtor
.cfi_endproc
# -- End function
.type computeSquaredMagnitudes,@object # @computeSquaredMagnitudes
.section .rodata,"a",@progbits
.globl computeSquaredMagnitudes
.p2align 3, 0x0
computeSquaredMagnitudes:
.quad __device_stub__computeSquaredMagnitudes
.size computeSquaredMagnitudes, 8
.type scaleWithVariances,@object # @scaleWithVariances
.globl scaleWithVariances
.p2align 3, 0x0
scaleWithVariances:
.quad __device_stub__scaleWithVariances
.size scaleWithVariances, 8
.type toComplex,@object # @toComplex
.globl toComplex
.p2align 3, 0x0
toComplex:
.quad __device_stub__toComplex
.size toComplex, 8
.type toReal,@object # @toReal
.globl toReal
.p2align 3, 0x0
toReal:
.quad __device_stub__toReal
.size toReal, 8
.type normalizeToReal,@object # @normalizeToReal
.globl normalizeToReal
.p2align 3, 0x0
normalizeToReal:
.quad __device_stub__normalizeToReal
.size normalizeToReal, 8
.type normalize,@object # @normalize
.globl normalize
.p2align 3, 0x0
normalize:
.quad __device_stub__normalize
.size normalize, 8
.type computeVarianceEstimates,@object # @computeVarianceEstimates
.globl computeVarianceEstimates
.p2align 3, 0x0
computeVarianceEstimates:
.quad __device_stub__computeVarianceEstimates
.size computeVarianceEstimates, 8
.type computeVarianceEstimates_naive,@object # @computeVarianceEstimates_naive
.globl computeVarianceEstimates_naive
.p2align 3, 0x0
computeVarianceEstimates_naive:
.quad __device_stub__computeVarianceEstimates_naive
.size computeVarianceEstimates_naive, 8
.type computeVarianceZeroMean,@object # @computeVarianceZeroMean
.globl computeVarianceZeroMean
.p2align 3, 0x0
computeVarianceZeroMean:
.quad __device_stub__computeVarianceZeroMean
.size computeVarianceZeroMean, 8
.type sumFloats,@object # @sumFloats
.globl sumFloats
.p2align 3, 0x0
sumFloats:
.quad __device_stub__sumFloats
.size sumFloats, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "computeSquaredMagnitudes"
.size .L__unnamed_1, 25
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "scaleWithVariances"
.size .L__unnamed_2, 19
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "toComplex"
.size .L__unnamed_3, 10
.type .L__unnamed_4,@object # @3
.L__unnamed_4:
.asciz "toReal"
.size .L__unnamed_4, 7
.type .L__unnamed_5,@object # @4
.L__unnamed_5:
.asciz "normalizeToReal"
.size .L__unnamed_5, 16
.type .L__unnamed_6,@object # @5
.L__unnamed_6:
.asciz "normalize"
.size .L__unnamed_6, 10
.type .L__unnamed_7,@object # @6
.L__unnamed_7:
.asciz "computeVarianceEstimates"
.size .L__unnamed_7, 25
.type .L__unnamed_8,@object # @7
.L__unnamed_8:
.asciz "computeVarianceEstimates_naive"
.size .L__unnamed_8, 31
.type .L__unnamed_9,@object # @8
.L__unnamed_9:
.asciz "computeVarianceZeroMean"
.size .L__unnamed_9, 24
.type .L__unnamed_10,@object # @9
.L__unnamed_10:
.asciz "sumFloats"
.size .L__unnamed_10, 10
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __device_stub__computeSquaredMagnitudes
.addrsig_sym __device_stub__scaleWithVariances
.addrsig_sym __device_stub__toComplex
.addrsig_sym __device_stub__toReal
.addrsig_sym __device_stub__normalizeToReal
.addrsig_sym __device_stub__normalize
.addrsig_sym __device_stub__computeVarianceEstimates
.addrsig_sym __device_stub__computeVarianceEstimates_naive
.addrsig_sym __device_stub__computeVarianceZeroMean
.addrsig_sym __device_stub__sumFloats
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym computeSquaredMagnitudes
.addrsig_sym scaleWithVariances
.addrsig_sym toComplex
.addrsig_sym toReal
.addrsig_sym normalizeToReal
.addrsig_sym normalize
.addrsig_sym computeVarianceEstimates
.addrsig_sym computeVarianceEstimates_naive
.addrsig_sym computeVarianceZeroMean
.addrsig_sym sumFloats
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <stdio.h>
// Size of array
#define N 1048576
// Kernel
__global__ void add_vectors(int *a, int *b, int *c)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
if(id < N) c[id] = a[id] + b[id];
}
// Main program
int main()
{
// Number of bytes to allocate for N integers
size_t bytes = N*sizeof(int);
// Allocate memory for arrays A, B, and C on host
int *A = (int*)malloc(bytes);
int *B = (int*)malloc(bytes);
int *C = (int*)malloc(bytes);
// Allocate memory for arrays d_A, d_B, and d_C on device
int *d_A, *d_B, *d_C;
cudaMalloc(&d_A, bytes);
cudaMalloc(&d_B, bytes);
cudaMalloc(&d_C, bytes);
// Fill host arrays A and B
for(int i=0; i<N; i++)
{
A[i] = 1;
B[i] = 2;
}
// Copy data from host arrays A and B to device arrays d_A and d_B
cudaMemcpy(d_A, A, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, bytes, cudaMemcpyHostToDevice);
// Set execution configuration parameters
// thr_per_blk: number of CUDA threads per grid block
// blk_in_grid: number of blocks in grid
int thr_per_blk = 256;
int blk_in_grid = ceil( float(N) / thr_per_blk );
// Launch kernel
add_vectors<<< blk_in_grid, thr_per_blk >>>(d_A, d_B, d_C);
// Copy data from device array d_C to host array C
cudaMemcpy(C, d_C, bytes, cudaMemcpyDeviceToHost);
// Verify results
for(int i=0; i<N; i++)
{
if(C[i] != 3)
{
printf("\nError: value of C[%d] = %d instead of 3\n\n", i, C[i]);
exit(-1);
}
}
// Free CPU memory
free(A);
free(B);
free(C);
// Free GPU memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
printf("\n---------------------------\n");
printf("__SUCCESS__\n");
printf("---------------------------\n");
printf("N = %d\n", N);
printf("Threads Per Block = %d\n", thr_per_blk);
printf("Blocks In Grid = %d\n", blk_in_grid);
printf("---------------------------\n\n");
return 0;
} | code for sm_80
Function : _Z11add_vectorsPiS_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R6, R6, c[0x0][0x0], R3 ; /* 0x0000000006067a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GT.AND P0, PT, R6, 0xfffff, PT ; /* 0x000fffff0600780c */
/* 0x000fda0003f04270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R4, R6, R7, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x000fc800078e0207 */
/*0090*/ IMAD.WIDE R2, R6.reuse, R7.reuse, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x0c0fe400078e0207 */
/*00a0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*00b0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*00c0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fe200078e0207 */
/*00d0*/ IADD3 R9, R4, R3, RZ ; /* 0x0000000304097210 */
/* 0x004fca0007ffe0ff */
/*00e0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0100*/ BRA 0x100; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
// Size of array
#define N 1048576
// Kernel
__global__ void add_vectors(int *a, int *b, int *c)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
if(id < N) c[id] = a[id] + b[id];
}
// Main program
int main()
{
// Number of bytes to allocate for N integers
size_t bytes = N*sizeof(int);
// Allocate memory for arrays A, B, and C on host
int *A = (int*)malloc(bytes);
int *B = (int*)malloc(bytes);
int *C = (int*)malloc(bytes);
// Allocate memory for arrays d_A, d_B, and d_C on device
int *d_A, *d_B, *d_C;
cudaMalloc(&d_A, bytes);
cudaMalloc(&d_B, bytes);
cudaMalloc(&d_C, bytes);
// Fill host arrays A and B
for(int i=0; i<N; i++)
{
A[i] = 1;
B[i] = 2;
}
// Copy data from host arrays A and B to device arrays d_A and d_B
cudaMemcpy(d_A, A, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, bytes, cudaMemcpyHostToDevice);
// Set execution configuration parameters
// thr_per_blk: number of CUDA threads per grid block
// blk_in_grid: number of blocks in grid
int thr_per_blk = 256;
int blk_in_grid = ceil( float(N) / thr_per_blk );
// Launch kernel
add_vectors<<< blk_in_grid, thr_per_blk >>>(d_A, d_B, d_C);
// Copy data from device array d_C to host array C
cudaMemcpy(C, d_C, bytes, cudaMemcpyDeviceToHost);
// Verify results
for(int i=0; i<N; i++)
{
if(C[i] != 3)
{
printf("\nError: value of C[%d] = %d instead of 3\n\n", i, C[i]);
exit(-1);
}
}
// Free CPU memory
free(A);
free(B);
free(C);
// Free GPU memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
printf("\n---------------------------\n");
printf("__SUCCESS__\n");
printf("---------------------------\n");
printf("N = %d\n", N);
printf("Threads Per Block = %d\n", thr_per_blk);
printf("Blocks In Grid = %d\n", blk_in_grid);
printf("---------------------------\n\n");
return 0;
} | .file "tmpxft_001107ef_00000000-6_vector_addition.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z35__device_stub__Z11add_vectorsPiS_S_PiS_S_
.type _Z35__device_stub__Z11add_vectorsPiS_S_PiS_S_, @function
_Z35__device_stub__Z11add_vectorsPiS_S_PiS_S_:
.LFB2082:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z11add_vectorsPiS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z35__device_stub__Z11add_vectorsPiS_S_PiS_S_, .-_Z35__device_stub__Z11add_vectorsPiS_S_PiS_S_
.globl _Z11add_vectorsPiS_S_
.type _Z11add_vectorsPiS_S_, @function
_Z11add_vectorsPiS_S_:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z35__device_stub__Z11add_vectorsPiS_S_PiS_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z11add_vectorsPiS_S_, .-_Z11add_vectorsPiS_S_
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "\nError: value of C[%d] = %d instead of 3\n\n"
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "\n---------------------------\n"
.LC2:
.string "__SUCCESS__\n"
.LC3:
.string "---------------------------\n"
.LC4:
.string "N = %d\n"
.LC5:
.string "Threads Per Block = %d\n"
.LC6:
.string "Blocks In Grid = %d\n"
.LC7:
.string "---------------------------\n\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $64, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
movl $4194304, %edi
call malloc@PLT
movq %rax, %rbp
movl $4194304, %edi
call malloc@PLT
movq %rax, %rbx
movl $4194304, %edi
call malloc@PLT
movq %rax, %r12
leaq 8(%rsp), %rdi
movl $4194304, %esi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movl $4194304, %esi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movl $4194304, %esi
call cudaMalloc@PLT
movl $0, %eax
.L12:
movl $1, 0(%rbp,%rax)
movl $2, (%rbx,%rax)
addq $4, %rax
cmpq $4194304, %rax
jne .L12
movl $1, %ecx
movl $4194304, %edx
movq %rbp, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $4194304, %edx
movq %rbx, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl $256, 44(%rsp)
movl $1, 48(%rsp)
movl $4096, 32(%rsp)
movl $1, 36(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L20
.L13:
movl $2, %ecx
movl $4194304, %edx
movq 24(%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
movl $0, %eax
.L15:
movl (%r12,%rax,4), %ecx
cmpl $3, %ecx
jne .L21
addq $1, %rax
cmpq $1048576, %rax
jne .L15
movq %rbp, %rdi
call free@PLT
movq %rbx, %rdi
call free@PLT
movq %r12, %rdi
call free@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1048576, %edx
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $256, %edx
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $4096, %edx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L22
movl $0, %eax
addq $64, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L20:
.cfi_restore_state
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z35__device_stub__Z11add_vectorsPiS_S_PiS_S_
jmp .L13
.L21:
movl %eax, %edx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $-1, %edi
call exit@PLT
.L22:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC8:
.string "_Z11add_vectorsPiS_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC8(%rip), %rdx
movq %rdx, %rcx
leaq _Z11add_vectorsPiS_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
// Size of array
#define N 1048576
// Kernel
__global__ void add_vectors(int *a, int *b, int *c)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
if(id < N) c[id] = a[id] + b[id];
}
// Main program
int main()
{
// Number of bytes to allocate for N integers
size_t bytes = N*sizeof(int);
// Allocate memory for arrays A, B, and C on host
int *A = (int*)malloc(bytes);
int *B = (int*)malloc(bytes);
int *C = (int*)malloc(bytes);
// Allocate memory for arrays d_A, d_B, and d_C on device
int *d_A, *d_B, *d_C;
cudaMalloc(&d_A, bytes);
cudaMalloc(&d_B, bytes);
cudaMalloc(&d_C, bytes);
// Fill host arrays A and B
for(int i=0; i<N; i++)
{
A[i] = 1;
B[i] = 2;
}
// Copy data from host arrays A and B to device arrays d_A and d_B
cudaMemcpy(d_A, A, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, bytes, cudaMemcpyHostToDevice);
// Set execution configuration parameters
// thr_per_blk: number of CUDA threads per grid block
// blk_in_grid: number of blocks in grid
int thr_per_blk = 256;
int blk_in_grid = ceil( float(N) / thr_per_blk );
// Launch kernel
add_vectors<<< blk_in_grid, thr_per_blk >>>(d_A, d_B, d_C);
// Copy data from device array d_C to host array C
cudaMemcpy(C, d_C, bytes, cudaMemcpyDeviceToHost);
// Verify results
for(int i=0; i<N; i++)
{
if(C[i] != 3)
{
printf("\nError: value of C[%d] = %d instead of 3\n\n", i, C[i]);
exit(-1);
}
}
// Free CPU memory
free(A);
free(B);
free(C);
// Free GPU memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
printf("\n---------------------------\n");
printf("__SUCCESS__\n");
printf("---------------------------\n");
printf("N = %d\n", N);
printf("Threads Per Block = %d\n", thr_per_blk);
printf("Blocks In Grid = %d\n", blk_in_grid);
printf("---------------------------\n\n");
return 0;
} | #include <hip/hip_runtime.h>
#include <stdio.h>
// Size of array
#define N 1048576
// Kernel
__global__ void add_vectors(int *a, int *b, int *c)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
if(id < N) c[id] = a[id] + b[id];
}
// Main program
int main()
{
// Number of bytes to allocate for N integers
size_t bytes = N*sizeof(int);
// Allocate memory for arrays A, B, and C on host
int *A = (int*)malloc(bytes);
int *B = (int*)malloc(bytes);
int *C = (int*)malloc(bytes);
// Allocate memory for arrays d_A, d_B, and d_C on device
int *d_A, *d_B, *d_C;
hipMalloc(&d_A, bytes);
hipMalloc(&d_B, bytes);
hipMalloc(&d_C, bytes);
// Fill host arrays A and B
for(int i=0; i<N; i++)
{
A[i] = 1;
B[i] = 2;
}
// Copy data from host arrays A and B to device arrays d_A and d_B
hipMemcpy(d_A, A, bytes, hipMemcpyHostToDevice);
hipMemcpy(d_B, B, bytes, hipMemcpyHostToDevice);
// Set execution configuration parameters
// thr_per_blk: number of CUDA threads per grid block
// blk_in_grid: number of blocks in grid
int thr_per_blk = 256;
int blk_in_grid = ceil( float(N) / thr_per_blk );
// Launch kernel
add_vectors<<< blk_in_grid, thr_per_blk >>>(d_A, d_B, d_C);
// Copy data from device array d_C to host array C
hipMemcpy(C, d_C, bytes, hipMemcpyDeviceToHost);
// Verify results
for(int i=0; i<N; i++)
{
if(C[i] != 3)
{
printf("\nError: value of C[%d] = %d instead of 3\n\n", i, C[i]);
exit(-1);
}
}
// Free CPU memory
free(A);
free(B);
free(C);
// Free GPU memory
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
printf("\n---------------------------\n");
printf("__SUCCESS__\n");
printf("---------------------------\n");
printf("N = %d\n", N);
printf("Threads Per Block = %d\n", thr_per_blk);
printf("Blocks In Grid = %d\n", blk_in_grid);
printf("---------------------------\n\n");
return 0;
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.