system stringclasses 7 values | input stringlengths 10 94.5k | output stringlengths 14 55.5k |
|---|---|---|
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
int main(int argc, char **argv)
{
printf("Hello World from CPU!\n");
return 0;
} | #include <hip/hip_runtime.h>
#include <stdio.h>
int main(int argc, char **argv)
{
printf("Hello World from CPU!\n");
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
int main(int argc, char **argv)
{
printf("Hello World from CPU!\n");
return 0;
} | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
int main(int argc, char **argv)
{
printf("Hello World from CPU!\n");
return 0;
} | .text
.file "helloworld.hip"
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rax
.cfi_def_cfa_offset 16
movl $.Lstr, %edi
callq puts@PLT
xorl %eax, %eax
popq %rcx
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size main, .Lfunc_end0-main
.cfi_endproc
# -- End function
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Hello World from CPU!"
.size .Lstr, 22
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80 | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000d5242_00000000-6_helloworld.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Hello World from CPU!\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %eax
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2057:
.size main, .-main
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "helloworld.hip"
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rax
.cfi_def_cfa_offset 16
movl $.Lstr, %edi
callq puts@PLT
xorl %eax, %eax
popq %rcx
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size main, .Lfunc_end0-main
.cfi_endproc
# -- End function
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Hello World from CPU!"
.size .Lstr, 22
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | // (c) Copyright 2013 Lev Barash, Landau Institute for Theoretical Physics, Russian Academy of Sciences
// This is supplement to the paper:
// L.Yu. Barash, L.N. Shchur, "PRAND: GPU accelerated parallel random number generation library: Using most reliable algorithms and applying parallelism of modern GPUs and CPUs".
// e-mail: barash @ itp.ac.ru (remove space)
#include<stdio.h>
#define gm31_CUDA_CALL(x) do { if((x) != cudaSuccess) { printf("Error: %s at %s:%d\n",cudaGetErrorString(cudaGetLastError()),__FILE__,__LINE__); exit(1);}} while(0)
#define gm31_BLOCKS 512
#define gm31_THREADS 128
#define gm31_ARRAY_SECTIONS (gm31_BLOCKS*gm31_THREADS/32)
#define gm31_qg 30064771058ULL
#define gm31_g 2147483647
#define gm31_halfg 1073741824
#define gm31_k 11
#define gm31_q 14
typedef struct{
unsigned xN[32],xP[32];
} gm31_state;
typedef struct{
unsigned xN[64] __attribute__ ((aligned(16))),
xP[64] __attribute__ ((aligned(16)));
} gm31_sse_state;
unsigned gm31_Consts[16] __attribute__ ((aligned(16))) =
{4294967222UL,36UL,4294967222UL,36UL,gm31_k,0,gm31_k,0,gm31_q,0,gm31_q,0,gm31_g,0,gm31_g,0};
__host__ unsigned int gm31_sse_generate_(gm31_sse_state* state){
unsigned output1,output2;
asm volatile("\n" \
"movaps 48(%3),%%xmm5\n" \
"\n" \
"movaps (%1),%%xmm0\n" \
"movaps %%xmm0,%%xmm7\n" \
"movaps (%2),%%xmm6\n" \
"pmuludq 16(%3),%%xmm0\n" \
"pmuludq 32(%3),%%xmm6\n" \
"paddq (%3),%%xmm0\n" \
"psubq %%xmm6,%%xmm0\n" \
"movaps %%xmm0,%%xmm6\n" \
"psrlq $31,%%xmm6\n" \
"andps %%xmm5,%%xmm0\n" \
"paddq %%xmm6,%%xmm0\n" \
"movaps %%xmm0,(%1)\n" \
"movaps %%xmm7,(%2)\n" \
"\n" \
"movaps 16(%1),%%xmm1\n" \
"movaps %%xmm1,%%xmm7\n" \
"movaps 16(%2),%%xmm6\n" \
"pmuludq 16(%3),%%xmm1\n" \
"pmuludq 32(%3),%%xmm6\n" \
"paddq (%3),%%xmm1\n" \
"psubq %%xmm6,%%xmm1\n" \
"movaps %%xmm1,%%xmm6\n" \
"psrlq $31,%%xmm6\n" \
"andps %%xmm5,%%xmm1\n" \
"paddq %%xmm6,%%xmm1\n" \
"movaps %%xmm1,16(%1)\n" \
"movaps %%xmm7,16(%2)\n" \
"\n" \
"movaps 32(%1),%%xmm2\n" \
"movaps %%xmm2,%%xmm7\n" \
"movaps 32(%2),%%xmm6\n" \
"pmuludq 16(%3),%%xmm2\n" \
"pmuludq 32(%3),%%xmm6\n" \
"paddq (%3),%%xmm2\n" \
"psubq %%xmm6,%%xmm2\n" \
"movaps %%xmm2,%%xmm6\n" \
"psrlq $31,%%xmm6\n" \
"andps %%xmm5,%%xmm2\n" \
"paddq %%xmm6,%%xmm2\n" \
"movaps %%xmm2,32(%1)\n" \
"movaps %%xmm7,32(%2)\n" \
"\n" \
"movaps 48(%1),%%xmm3\n" \
"movaps %%xmm3,%%xmm7\n" \
"movaps 48(%2),%%xmm6\n" \
"pmuludq 16(%3),%%xmm3\n" \
"pmuludq 32(%3),%%xmm6\n" \
"paddq (%3),%%xmm3\n" \
"psubq %%xmm6,%%xmm3\n" \
"movaps %%xmm3,%%xmm6\n" \
"psrlq $31,%%xmm6\n" \
"andps %%xmm5,%%xmm3\n" \
"paddq %%xmm6,%%xmm3\n" \
"movaps %%xmm3,48(%1)\n" \
"movaps %%xmm7,48(%2)\n" \
"\n" \
"shufps $136,%%xmm1,%%xmm0\n" \
"shufps $136,%%xmm3,%%xmm2\n" \
"psrld $30,%%xmm0\n" \
"psrld $30,%%xmm2\n" \
"packssdw %%xmm2,%%xmm0\n" \
"\n" \
"movaps 64(%1),%%xmm1\n" \
"movaps %%xmm1,%%xmm7\n" \
"movaps 64(%2),%%xmm6\n" \
"pmuludq 16(%3),%%xmm1\n" \
"pmuludq 32(%3),%%xmm6\n" \
"paddq (%3),%%xmm1\n" \
"psubq %%xmm6,%%xmm1\n" \
"movaps %%xmm1,%%xmm6\n" \
"psrlq $31,%%xmm6\n" \
"andps %%xmm5,%%xmm1\n" \
"paddq %%xmm6,%%xmm1\n" \
"movaps %%xmm1,64(%1)\n" \
"movaps %%xmm7,64(%2)\n" \
"\n" \
"movaps 80(%1),%%xmm2\n" \
"movaps %%xmm2,%%xmm7\n" \
"movaps 80(%2),%%xmm6\n" \
"pmuludq 16(%3),%%xmm2\n" \
"pmuludq 32(%3),%%xmm6\n" \
"paddq (%3),%%xmm2\n" \
"psubq %%xmm6,%%xmm2\n" \
"movaps %%xmm2,%%xmm6\n" \
"psrlq $31,%%xmm6\n" \
"andps %%xmm5,%%xmm2\n" \
"paddq %%xmm6,%%xmm2\n" \
"movaps %%xmm2,80(%1)\n" \
"movaps %%xmm7,80(%2)\n" \
"\n" \
"movaps 96(%1),%%xmm3\n" \
"movaps %%xmm3,%%xmm7\n" \
"movaps 96(%2),%%xmm6\n" \
"pmuludq 16(%3),%%xmm3\n" \
"pmuludq 32(%3),%%xmm6\n" \
"paddq (%3),%%xmm3\n" \
"psubq %%xmm6,%%xmm3\n" \
"movaps %%xmm3,%%xmm6\n" \
"psrlq $31,%%xmm6\n" \
"andps %%xmm5,%%xmm3\n" \
"paddq %%xmm6,%%xmm3\n" \
"movaps %%xmm3,96(%1)\n" \
"movaps %%xmm7,96(%2)\n" \
"\n" \
"movaps 112(%1),%%xmm4\n" \
"movaps %%xmm4,%%xmm7\n" \
"movaps 112(%2),%%xmm6\n" \
"pmuludq 16(%3),%%xmm4\n" \
"pmuludq 32(%3),%%xmm6\n" \
"paddq (%3),%%xmm4\n" \
"psubq %%xmm6,%%xmm4\n" \
"movaps %%xmm4,%%xmm6\n" \
"psrlq $31,%%xmm6\n" \
"andps %%xmm5,%%xmm4\n" \
"paddq %%xmm6,%%xmm4\n" \
"movaps %%xmm4,112(%1)\n" \
"movaps %%xmm7,112(%2)\n" \
"\n" \
"shufps $136,%%xmm2,%%xmm1\n" \
"shufps $136,%%xmm4,%%xmm3\n" \
"psrld $30,%%xmm1\n" \
"psrld $30,%%xmm3\n" \
"packssdw %%xmm3,%%xmm1\n" \
"\n" \
"packsswb %%xmm1,%%xmm0\n" \
"psllw $7,%%xmm0\n" \
"pmovmskb %%xmm0,%0\n" \
"":"=r"(output1):"r"(state->xN),"r"(state->xP),"r"(gm31_Consts));
asm volatile("\n" \
"movaps 128(%1),%%xmm0\n" \
"movaps %%xmm0,%%xmm7\n" \
"movaps 128(%2),%%xmm6\n" \
"pmuludq 16(%3),%%xmm0\n" \
"pmuludq 32(%3),%%xmm6\n" \
"paddq (%3),%%xmm0\n" \
"psubq %%xmm6,%%xmm0\n" \
"movaps %%xmm0,%%xmm6\n" \
"psrlq $31,%%xmm6\n" \
"andps %%xmm5,%%xmm0\n" \
"paddq %%xmm6,%%xmm0\n" \
"movaps %%xmm0,128(%1)\n" \
"movaps %%xmm7,128(%2)\n" \
"\n" \
"movaps 144(%1),%%xmm1\n" \
"movaps %%xmm1,%%xmm7\n" \
"movaps 144(%2),%%xmm6\n" \
"pmuludq 16(%3),%%xmm1\n" \
"pmuludq 32(%3),%%xmm6\n" \
"paddq (%3),%%xmm1\n" \
"psubq %%xmm6,%%xmm1\n" \
"movaps %%xmm1,%%xmm6\n" \
"psrlq $31,%%xmm6\n" \
"andps %%xmm5,%%xmm1\n" \
"paddq %%xmm6,%%xmm1\n" \
"movaps %%xmm1,144(%1)\n" \
"movaps %%xmm7,144(%2)\n" \
"\n" \
"movaps 160(%1),%%xmm2\n" \
"movaps %%xmm2,%%xmm7\n" \
"movaps 160(%2),%%xmm6\n" \
"pmuludq 16(%3),%%xmm2\n" \
"pmuludq 32(%3),%%xmm6\n" \
"paddq (%3),%%xmm2\n" \
"psubq %%xmm6,%%xmm2\n" \
"movaps %%xmm2,%%xmm6\n" \
"psrlq $31,%%xmm6\n" \
"andps %%xmm5,%%xmm2\n" \
"paddq %%xmm6,%%xmm2\n" \
"movaps %%xmm2,160(%1)\n" \
"movaps %%xmm7,160(%2)\n" \
"\n" \
"movaps 176(%1),%%xmm3\n" \
"movaps %%xmm3,%%xmm7\n" \
"movaps 176(%2),%%xmm6\n" \
"pmuludq 16(%3),%%xmm3\n" \
"pmuludq 32(%3),%%xmm6\n" \
"paddq (%3),%%xmm3\n" \
"psubq %%xmm6,%%xmm3\n" \
"movaps %%xmm3,%%xmm6\n" \
"psrlq $31,%%xmm6\n" \
"andps %%xmm5,%%xmm3\n" \
"paddq %%xmm6,%%xmm3\n" \
"movaps %%xmm3,176(%1)\n" \
"movaps %%xmm7,176(%2)\n" \
"\n" \
"shufps $136,%%xmm1,%%xmm0\n" \
"shufps $136,%%xmm3,%%xmm2\n" \
"psrld $30,%%xmm0\n" \
"psrld $30,%%xmm2\n" \
"packssdw %%xmm2,%%xmm0\n" \
"\n" \
"movaps 192(%1),%%xmm1\n" \
"movaps %%xmm1,%%xmm7\n" \
"movaps 192(%2),%%xmm6\n" \
"pmuludq 16(%3),%%xmm1\n" \
"pmuludq 32(%3),%%xmm6\n" \
"paddq (%3),%%xmm1\n" \
"psubq %%xmm6,%%xmm1\n" \
"movaps %%xmm1,%%xmm6\n" \
"psrlq $31,%%xmm6\n" \
"andps %%xmm5,%%xmm1\n" \
"paddq %%xmm6,%%xmm1\n" \
"movaps %%xmm1,192(%1)\n" \
"movaps %%xmm7,192(%2)\n" \
"\n" \
"movaps 208(%1),%%xmm2\n" \
"movaps %%xmm2,%%xmm7\n" \
"movaps 208(%2),%%xmm6\n" \
"pmuludq 16(%3),%%xmm2\n" \
"pmuludq 32(%3),%%xmm6\n" \
"paddq (%3),%%xmm2\n" \
"psubq %%xmm6,%%xmm2\n" \
"movaps %%xmm2,%%xmm6\n" \
"psrlq $31,%%xmm6\n" \
"andps %%xmm5,%%xmm2\n" \
"paddq %%xmm6,%%xmm2\n" \
"movaps %%xmm2,208(%1)\n" \
"movaps %%xmm7,208(%2)\n" \
"\n" \
"movaps 224(%1),%%xmm3\n" \
"movaps %%xmm3,%%xmm7\n" \
"movaps 224(%2),%%xmm6\n" \
"pmuludq 16(%3),%%xmm3\n" \
"pmuludq 32(%3),%%xmm6\n" \
"paddq (%3),%%xmm3\n" \
"psubq %%xmm6,%%xmm3\n" \
"movaps %%xmm3,%%xmm6\n" \
"psrlq $31,%%xmm6\n" \
"andps %%xmm5,%%xmm3\n" \
"paddq %%xmm6,%%xmm3\n" \
"movaps %%xmm3,224(%1)\n" \
"movaps %%xmm7,224(%2)\n" \
"\n" \
"movaps 240(%1),%%xmm4\n" \
"movaps %%xmm4,%%xmm7\n" \
"movaps 240(%2),%%xmm6\n" \
"pmuludq 16(%3),%%xmm4\n" \
"pmuludq 32(%3),%%xmm6\n" \
"paddq (%3),%%xmm4\n" \
"psubq %%xmm6,%%xmm4\n" \
"movaps %%xmm4,%%xmm6\n" \
"psrlq $31,%%xmm6\n" \
"andps %%xmm5,%%xmm4\n" \
"paddq %%xmm6,%%xmm4\n" \
"movaps %%xmm4,240(%1)\n" \
"movaps %%xmm7,240(%2)\n" \
"\n" \
"shufps $136,%%xmm2,%%xmm1\n" \
"shufps $136,%%xmm4,%%xmm3\n" \
"psrld $30,%%xmm1\n" \
"psrld $30,%%xmm3\n" \
"packssdw %%xmm3,%%xmm1\n" \
"\n" \
"packsswb %%xmm1,%%xmm0\n" \
"psllw $7,%%xmm0\n" \
"pmovmskb %%xmm0,%0\n" \
"shll $16,%0\n" \
"":"=r"(output2):"r"(state->xN),"r"(state->xP),"r"(gm31_Consts));
asm volatile("\n" \
"addl %1,%0\n" \
"\n" \
"":"=r"(output2):"r"(output1),"0"(output2));
return output2;
}
__device__ __host__ void gm31_get_sse_state_(gm31_state* state,gm31_sse_state* sse_state){
int i;
for(i=0;i<32;i++) {
sse_state->xN[2*i]=state->xN[i]; sse_state->xP[2*i]=state->xP[i];
sse_state->xN[2*i+1]=sse_state->xP[2*i+1]=0;
}
}
__device__ __host__ unsigned gm31_mod_g(unsigned long long x){ // returns x (mod g)
unsigned long long F,G; G=x;
do{ F=(G>>31); G = (G-(F<<31)+F); } while(G>gm31_g);
return G;
}
__device__ __host__ unsigned gm31_CNext(unsigned N,unsigned P){
unsigned long long curr1,curr2,curr3;
curr1=(unsigned long long)gm31_k*(unsigned long long)N;
curr2=(unsigned long long)gm31_q*(unsigned long long)P;
curr3=gm31_mod_g(gm31_qg+curr1-curr2); return curr3;
}
__device__ __host__ unsigned gm31_CNext2(unsigned N,unsigned P,unsigned myk,unsigned myq){
unsigned long long curr1,curr2,curr3;
curr1=(unsigned long long)myk*(unsigned long long)N;
curr2=(unsigned long long)myq*(unsigned long long)P;
curr3=gm31_mod_g((unsigned long long)myq*(unsigned long long)gm31_g+curr1-curr2);
return curr3;
}
__device__ __host__ unsigned gm31_GetNextN(unsigned x0,unsigned x1,unsigned n){ // returns x_{2^n}
unsigned myk=gm31_k,myq=gm31_q,i,x=x1;
for(i=0;i<n;i++){
x=gm31_CNext2(x,x0,myk,myq);
myk=gm31_CNext2(myk,2,myk,myq);
myq=gm31_CNext2(myq,0,myq,0);
}
return x;
}
__device__ __host__ unsigned gm31_GetNextAny(unsigned x0,unsigned x1,unsigned long long N){ // returns x_N
unsigned long long i; unsigned xp=x0,xn=x1,xpnew,xnnew,shift=0;
i=N; while(i>0){
if(i%2==1){ // xp,xn ----> 2^shift
xpnew=gm31_GetNextN(xp,xn,shift);
xnnew=gm31_GetNextN(xn,gm31_CNext(xn,xp),shift);
xp=xpnew; xn=xnnew;
}
i/=2; shift++;
}
return xp;
}
__device__ __host__ void gm31_skipahead_(gm31_state* state, unsigned long long offset){
unsigned xn,xp,j;
for(j=0;j<32;j++){
xp=gm31_GetNextAny(state->xP[j],state->xN[j],offset);
xn=gm31_GetNextAny(state->xP[j],state->xN[j],offset+1);
state->xP[j]=xp; state->xN[j]=xn;
}
}
__device__ __host__ void gm31_init_(gm31_state* state){
unsigned x0=554937932UL,x1=1253942293UL,xp,xn,j;
for(j=0;j<32;j++){
xp=gm31_GetNextAny(x0,x1,144115183781032008ULL);
xn=gm31_GetNextAny(x0,x1,144115183781032009ULL);
state->xP[j]=xp; state->xN[j]=xn; x0=xp; x1=xn;
}
}
__device__ __host__ void gm31_init_short_sequence_(gm31_state* state,unsigned SequenceNumber){
gm31_init_(state); // 0 <= SequenceNumber < 10^9; length of each sequence <= 8*10^7
gm31_skipahead_(state,82927047ULL*(unsigned long long)SequenceNumber);
}
__device__ __host__ void gm31_init_medium_sequence_(gm31_state* state,unsigned SequenceNumber){
gm31_init_(state); // 0 <= SequenceNumber < 10^7; length of each sequence <= 8*10^9
gm31_skipahead_(state,8799201913ULL*(unsigned long long)SequenceNumber);
}
__device__ __host__ void gm31_init_long_sequence_(gm31_state* state,unsigned SequenceNumber){
gm31_init_(state); // 0 <= SequenceNumber < 10^5; length of each sequence <= 8*10^11
gm31_skipahead_(state,828317697521ULL*(unsigned long long)SequenceNumber);
}
__device__ __host__ unsigned int gm31_generate_(gm31_state* state){
int i; unsigned temp,sum=0,bit=1;
for(i=0;i<32;i++){
temp=gm31_CNext(state->xN[i],state->xP[i]);
state->xP[i]=state->xN[i]; state->xN[i]=temp;
sum += ((temp<gm31_halfg)?0:bit); bit*=2;
}
return sum;
}
__device__ __host__ float gm31_generate_uniform_float_(gm31_state* state){
int i; unsigned temp,sum=0,bit=1;
for(i=0;i<32;i++){
temp=gm31_CNext(state->xN[i],state->xP[i]);
state->xP[i]=state->xN[i]; state->xN[i]=temp;
sum += ((temp<gm31_halfg)?0:bit); bit*=2;
}
return ((float) sum) * 2.3283064365386963e-10;
}
__host__ void gm31_print_state_(gm31_state* state){int i;
printf("Generator State:\nxN={");
for(i=0;i<32;i++) {printf("%u",state->xN[i]%gm31_g); printf((i<31)?",":"}\nxP={");}
for(i=0;i<32;i++) {printf("%u",state->xP[i]%gm31_g); printf((i<31)?",":"}\n\n");}
}
__host__ void gm31_print_sse_state_(gm31_sse_state* state){int i;
printf("Generator State:\nxN={");
for(i=0;i<32;i++) {printf("%u",state->xN[2*i]%gm31_g); printf((i<31)?",":"}\nxP={");}
for(i=0;i<32;i++) {printf("%u",state->xP[2*i]%gm31_g); printf((i<31)?",":"}\n\n");}
}
__global__ void gm31_kernel_generate_array(gm31_state* state, unsigned int* out, long* length) {
unsigned temp,i,orbit,seqNum; long offset;
__shared__ unsigned xP[gm31_THREADS]; // one generator per s=32 threads, i.e. one orbit
__shared__ unsigned xN[gm31_THREADS]; // per thread, i.e. blockDim.x orbits per block
__shared__ unsigned a[gm31_THREADS]; // array "a" contains corresponding parts of output
orbit = threadIdx.x % 32;
seqNum = (threadIdx.x + blockIdx.x * blockDim.x)>>5; // RNG_sequence index
offset = seqNum*(*length); // start of the section in the output array
xP[threadIdx.x]=gm31_GetNextAny(state->xP[orbit],state->xN[orbit],offset);
xN[threadIdx.x]=gm31_GetNextAny(state->xP[orbit],state->xN[orbit],offset+1);
for(i=0;i<(*length);i++){ // each s=32 threads result in "length" values in the output array
temp = gm31_CNext( xN[threadIdx.x], xP[threadIdx.x] );
xP[threadIdx.x] = xN[threadIdx.x]; xN[threadIdx.x] = temp;
a[threadIdx.x] = (temp < gm31_halfg ? 0 : (1<<orbit) );
__syncthreads();
if((orbit&3)==0) a[threadIdx.x] = a[threadIdx.x]+a[threadIdx.x+1]+a[threadIdx.x+2]+a[threadIdx.x+3];
__syncthreads();
if((orbit&15)==0) a[threadIdx.x] = a[threadIdx.x]+a[threadIdx.x+4]+a[threadIdx.x+8]+a[threadIdx.x+12];
__syncthreads();
if(orbit==0){ out[offset+i]=a[threadIdx.x]+a[threadIdx.x+16]; }
}
}
__host__ void gm31_generate_gpu_array_(gm31_state* state, unsigned int* dev_out, long length){
long mylength = length/gm31_ARRAY_SECTIONS;
gm31_state* dev_state;
long* dev_length;
if((mylength*gm31_ARRAY_SECTIONS)<length) mylength++;
gm31_CUDA_CALL(cudaMalloc((void**)&dev_state,sizeof(gm31_state)));
gm31_CUDA_CALL(cudaMalloc((void**)&dev_length,sizeof(long)));
gm31_CUDA_CALL(cudaMemcpy(dev_state,state,sizeof(gm31_state),cudaMemcpyHostToDevice));
gm31_CUDA_CALL(cudaMemcpy(dev_length,&mylength,sizeof(long),cudaMemcpyHostToDevice));
gm31_kernel_generate_array<<<gm31_BLOCKS,gm31_THREADS>>>(dev_state,dev_out,dev_length);
gm31_CUDA_CALL(cudaGetLastError());
gm31_CUDA_CALL(cudaFree(dev_state)); gm31_CUDA_CALL(cudaFree(dev_length));
}
__global__ void gm31_kernel_generate_array_float(gm31_state* state, float* out, long* length) {
unsigned temp,sum,i,orbit,seqNum; long offset;
__shared__ unsigned xP[gm31_THREADS]; // one generator per s=32 threads, i.e. one orbit
__shared__ unsigned xN[gm31_THREADS]; // per thread, i.e. blockDim.x orbits per block
__shared__ unsigned a[gm31_THREADS]; // array "a" contains corresponding parts of output
orbit = threadIdx.x % 32;
seqNum = (threadIdx.x + blockIdx.x * blockDim.x)>>5; // RNG_sequence index
offset = seqNum*(*length); // start of the section in the output array
xP[threadIdx.x]=gm31_GetNextAny(state->xP[orbit],state->xN[orbit],offset);
xN[threadIdx.x]=gm31_GetNextAny(state->xP[orbit],state->xN[orbit],offset+1);
for(i=0;i<(*length);i++){ // each s=32 threads result in "length" values in the output array
temp = gm31_CNext( xN[threadIdx.x], xP[threadIdx.x] );
xP[threadIdx.x] = xN[threadIdx.x]; xN[threadIdx.x] = temp;
a[threadIdx.x] = (temp < gm31_halfg ? 0 : (1<<orbit) );
__syncthreads();
if((orbit&3)==0) a[threadIdx.x] = a[threadIdx.x]+a[threadIdx.x+1]+a[threadIdx.x+2]+a[threadIdx.x+3];
__syncthreads();
if((orbit&15)==0) a[threadIdx.x] = a[threadIdx.x]+a[threadIdx.x+4]+a[threadIdx.x+8]+a[threadIdx.x+12];
__syncthreads();
if(orbit==0){ sum=a[threadIdx.x]+a[threadIdx.x+16]; out[offset+i]=((float)sum) * 2.3283064365386963e-10; }
}
}
__host__ void gm31_generate_gpu_array_float_(gm31_state* state, float* dev_out, long length){
long mylength = length/gm31_ARRAY_SECTIONS;
gm31_state* dev_state;
long* dev_length;
if((mylength*gm31_ARRAY_SECTIONS)<length) mylength++;
gm31_CUDA_CALL(cudaMalloc((void**)&dev_state,sizeof(gm31_state)));
gm31_CUDA_CALL(cudaMalloc((void**)&dev_length,sizeof(long)));
gm31_CUDA_CALL(cudaMemcpy(dev_state,state,sizeof(gm31_state),cudaMemcpyHostToDevice));
gm31_CUDA_CALL(cudaMemcpy(dev_length,&mylength,sizeof(long),cudaMemcpyHostToDevice));
gm31_kernel_generate_array_float<<<gm31_BLOCKS,gm31_THREADS>>>(dev_state,dev_out,dev_length);
gm31_CUDA_CALL(cudaGetLastError());
gm31_CUDA_CALL(cudaFree(dev_state)); gm31_CUDA_CALL(cudaFree(dev_length));
}
__global__ void gm31_kernel_generate_array_double(gm31_state* state, double* out, long* length) {
unsigned temp,sum,i,orbit,seqNum; long offset;
__shared__ unsigned xP[gm31_THREADS]; // one generator per s=32 threads, i.e. one orbit
__shared__ unsigned xN[gm31_THREADS]; // per thread, i.e. blockDim.x orbits per block
__shared__ unsigned a[gm31_THREADS]; // array "a" contains corresponding parts of output
orbit = threadIdx.x % 32;
seqNum = (threadIdx.x + blockIdx.x * blockDim.x)>>5; // RNG_sequence index
offset = seqNum*(*length); // start of the section in the output array
xP[threadIdx.x]=gm31_GetNextAny(state->xP[orbit],state->xN[orbit],offset);
xN[threadIdx.x]=gm31_GetNextAny(state->xP[orbit],state->xN[orbit],offset+1);
for(i=0;i<(*length);i++){ // each s=32 threads result in "length" values in the output array
temp = gm31_CNext( xN[threadIdx.x], xP[threadIdx.x] );
xP[threadIdx.x] = xN[threadIdx.x]; xN[threadIdx.x] = temp;
a[threadIdx.x] = (temp < gm31_halfg ? 0 : (1<<orbit) );
__syncthreads();
if((orbit&3)==0) a[threadIdx.x] = a[threadIdx.x]+a[threadIdx.x+1]+a[threadIdx.x+2]+a[threadIdx.x+3];
__syncthreads();
if((orbit&15)==0) a[threadIdx.x] = a[threadIdx.x]+a[threadIdx.x+4]+a[threadIdx.x+8]+a[threadIdx.x+12];
__syncthreads();
if(orbit==0){ sum=a[threadIdx.x]+a[threadIdx.x+16]; out[offset+i]=((double)sum) * 2.3283064365386963e-10; }
}
}
__host__ void gm31_generate_gpu_array_double_(gm31_state* state, double* dev_out, long length){
long mylength = length/gm31_ARRAY_SECTIONS;
gm31_state* dev_state;
long* dev_length;
if((mylength*gm31_ARRAY_SECTIONS)<length) mylength++;
gm31_CUDA_CALL(cudaMalloc((void**)&dev_state,sizeof(gm31_state)));
gm31_CUDA_CALL(cudaMalloc((void**)&dev_length,sizeof(long)));
gm31_CUDA_CALL(cudaMemcpy(dev_state,state,sizeof(gm31_state),cudaMemcpyHostToDevice));
gm31_CUDA_CALL(cudaMemcpy(dev_length,&mylength,sizeof(long),cudaMemcpyHostToDevice));
gm31_kernel_generate_array_double<<<gm31_BLOCKS,gm31_THREADS>>>(dev_state,dev_out,dev_length);
gm31_CUDA_CALL(cudaGetLastError());
gm31_CUDA_CALL(cudaFree(dev_state)); gm31_CUDA_CALL(cudaFree(dev_length));
}
__host__ void gm31_generate_array_(gm31_state* state, unsigned int* out, long length){
long mylength = length/gm31_ARRAY_SECTIONS;
gm31_state* dev_state;
unsigned int* dev_out;
long* dev_length;
if((mylength*gm31_ARRAY_SECTIONS)<length) mylength++;
gm31_CUDA_CALL(cudaMalloc((void**)&dev_state,sizeof(gm31_state)));
gm31_CUDA_CALL(cudaMalloc((void**)&dev_out,mylength*gm31_ARRAY_SECTIONS*sizeof(unsigned int)));
gm31_CUDA_CALL(cudaMalloc((void**)&dev_length,sizeof(long)));
gm31_CUDA_CALL(cudaMemcpy(dev_state,state,sizeof(gm31_state),cudaMemcpyHostToDevice));
gm31_CUDA_CALL(cudaMemcpy(dev_length,&mylength,sizeof(long),cudaMemcpyHostToDevice));
gm31_kernel_generate_array<<<gm31_BLOCKS,gm31_THREADS>>>(dev_state,dev_out,dev_length);
gm31_CUDA_CALL(cudaGetLastError());
gm31_CUDA_CALL(cudaMemcpy(out,dev_out,length*sizeof(unsigned int),cudaMemcpyDeviceToHost));
gm31_CUDA_CALL(cudaFree(dev_state)); gm31_CUDA_CALL(cudaFree(dev_out));
gm31_CUDA_CALL(cudaFree(dev_length));
} | // (c) Copyright 2013 Lev Barash, Landau Institute for Theoretical Physics, Russian Academy of Sciences
// This is supplement to the paper:
// L.Yu. Barash, L.N. Shchur, "PRAND: GPU accelerated parallel random number generation library: Using most reliable algorithms and applying parallelism of modern GPUs and CPUs".
// e-mail: barash @ itp.ac.ru (remove space)
#include <hip/hip_runtime.h>
#include<stdio.h>
#define gm31_CUDA_CALL(x) do { if((x) != hipSuccess) { printf("Error: %s at %s:%d\n",hipGetErrorString(hipGetLastError()),__FILE__,__LINE__); exit(1);}} while(0)
#define gm31_BLOCKS 512
#define gm31_THREADS 128
#define gm31_ARRAY_SECTIONS (gm31_BLOCKS*gm31_THREADS/32)
#define gm31_qg 30064771058ULL
#define gm31_g 2147483647
#define gm31_halfg 1073741824
#define gm31_k 11
#define gm31_q 14
typedef struct{
unsigned xN[32],xP[32];
} gm31_state;
typedef struct{
unsigned xN[64] __attribute__ ((aligned(16))),
xP[64] __attribute__ ((aligned(16)));
} gm31_sse_state;
unsigned gm31_Consts[16] __attribute__ ((aligned(16))) =
{4294967222UL,36UL,4294967222UL,36UL,gm31_k,0,gm31_k,0,gm31_q,0,gm31_q,0,gm31_g,0,gm31_g,0};
__host__ unsigned int gm31_sse_generate_(gm31_sse_state* state){
unsigned output1,output2;
asm volatile("\n" \
"movaps 48(%3),%%xmm5\n" \
"\n" \
"movaps (%1),%%xmm0\n" \
"movaps %%xmm0,%%xmm7\n" \
"movaps (%2),%%xmm6\n" \
"pmuludq 16(%3),%%xmm0\n" \
"pmuludq 32(%3),%%xmm6\n" \
"paddq (%3),%%xmm0\n" \
"psubq %%xmm6,%%xmm0\n" \
"movaps %%xmm0,%%xmm6\n" \
"psrlq $31,%%xmm6\n" \
"andps %%xmm5,%%xmm0\n" \
"paddq %%xmm6,%%xmm0\n" \
"movaps %%xmm0,(%1)\n" \
"movaps %%xmm7,(%2)\n" \
"\n" \
"movaps 16(%1),%%xmm1\n" \
"movaps %%xmm1,%%xmm7\n" \
"movaps 16(%2),%%xmm6\n" \
"pmuludq 16(%3),%%xmm1\n" \
"pmuludq 32(%3),%%xmm6\n" \
"paddq (%3),%%xmm1\n" \
"psubq %%xmm6,%%xmm1\n" \
"movaps %%xmm1,%%xmm6\n" \
"psrlq $31,%%xmm6\n" \
"andps %%xmm5,%%xmm1\n" \
"paddq %%xmm6,%%xmm1\n" \
"movaps %%xmm1,16(%1)\n" \
"movaps %%xmm7,16(%2)\n" \
"\n" \
"movaps 32(%1),%%xmm2\n" \
"movaps %%xmm2,%%xmm7\n" \
"movaps 32(%2),%%xmm6\n" \
"pmuludq 16(%3),%%xmm2\n" \
"pmuludq 32(%3),%%xmm6\n" \
"paddq (%3),%%xmm2\n" \
"psubq %%xmm6,%%xmm2\n" \
"movaps %%xmm2,%%xmm6\n" \
"psrlq $31,%%xmm6\n" \
"andps %%xmm5,%%xmm2\n" \
"paddq %%xmm6,%%xmm2\n" \
"movaps %%xmm2,32(%1)\n" \
"movaps %%xmm7,32(%2)\n" \
"\n" \
"movaps 48(%1),%%xmm3\n" \
"movaps %%xmm3,%%xmm7\n" \
"movaps 48(%2),%%xmm6\n" \
"pmuludq 16(%3),%%xmm3\n" \
"pmuludq 32(%3),%%xmm6\n" \
"paddq (%3),%%xmm3\n" \
"psubq %%xmm6,%%xmm3\n" \
"movaps %%xmm3,%%xmm6\n" \
"psrlq $31,%%xmm6\n" \
"andps %%xmm5,%%xmm3\n" \
"paddq %%xmm6,%%xmm3\n" \
"movaps %%xmm3,48(%1)\n" \
"movaps %%xmm7,48(%2)\n" \
"\n" \
"shufps $136,%%xmm1,%%xmm0\n" \
"shufps $136,%%xmm3,%%xmm2\n" \
"psrld $30,%%xmm0\n" \
"psrld $30,%%xmm2\n" \
"packssdw %%xmm2,%%xmm0\n" \
"\n" \
"movaps 64(%1),%%xmm1\n" \
"movaps %%xmm1,%%xmm7\n" \
"movaps 64(%2),%%xmm6\n" \
"pmuludq 16(%3),%%xmm1\n" \
"pmuludq 32(%3),%%xmm6\n" \
"paddq (%3),%%xmm1\n" \
"psubq %%xmm6,%%xmm1\n" \
"movaps %%xmm1,%%xmm6\n" \
"psrlq $31,%%xmm6\n" \
"andps %%xmm5,%%xmm1\n" \
"paddq %%xmm6,%%xmm1\n" \
"movaps %%xmm1,64(%1)\n" \
"movaps %%xmm7,64(%2)\n" \
"\n" \
"movaps 80(%1),%%xmm2\n" \
"movaps %%xmm2,%%xmm7\n" \
"movaps 80(%2),%%xmm6\n" \
"pmuludq 16(%3),%%xmm2\n" \
"pmuludq 32(%3),%%xmm6\n" \
"paddq (%3),%%xmm2\n" \
"psubq %%xmm6,%%xmm2\n" \
"movaps %%xmm2,%%xmm6\n" \
"psrlq $31,%%xmm6\n" \
"andps %%xmm5,%%xmm2\n" \
"paddq %%xmm6,%%xmm2\n" \
"movaps %%xmm2,80(%1)\n" \
"movaps %%xmm7,80(%2)\n" \
"\n" \
"movaps 96(%1),%%xmm3\n" \
"movaps %%xmm3,%%xmm7\n" \
"movaps 96(%2),%%xmm6\n" \
"pmuludq 16(%3),%%xmm3\n" \
"pmuludq 32(%3),%%xmm6\n" \
"paddq (%3),%%xmm3\n" \
"psubq %%xmm6,%%xmm3\n" \
"movaps %%xmm3,%%xmm6\n" \
"psrlq $31,%%xmm6\n" \
"andps %%xmm5,%%xmm3\n" \
"paddq %%xmm6,%%xmm3\n" \
"movaps %%xmm3,96(%1)\n" \
"movaps %%xmm7,96(%2)\n" \
"\n" \
"movaps 112(%1),%%xmm4\n" \
"movaps %%xmm4,%%xmm7\n" \
"movaps 112(%2),%%xmm6\n" \
"pmuludq 16(%3),%%xmm4\n" \
"pmuludq 32(%3),%%xmm6\n" \
"paddq (%3),%%xmm4\n" \
"psubq %%xmm6,%%xmm4\n" \
"movaps %%xmm4,%%xmm6\n" \
"psrlq $31,%%xmm6\n" \
"andps %%xmm5,%%xmm4\n" \
"paddq %%xmm6,%%xmm4\n" \
"movaps %%xmm4,112(%1)\n" \
"movaps %%xmm7,112(%2)\n" \
"\n" \
"shufps $136,%%xmm2,%%xmm1\n" \
"shufps $136,%%xmm4,%%xmm3\n" \
"psrld $30,%%xmm1\n" \
"psrld $30,%%xmm3\n" \
"packssdw %%xmm3,%%xmm1\n" \
"\n" \
"packsswb %%xmm1,%%xmm0\n" \
"psllw $7,%%xmm0\n" \
"pmovmskb %%xmm0,%0\n" \
"":"=r"(output1):"r"(state->xN),"r"(state->xP),"r"(gm31_Consts));
asm volatile("\n" \
"movaps 128(%1),%%xmm0\n" \
"movaps %%xmm0,%%xmm7\n" \
"movaps 128(%2),%%xmm6\n" \
"pmuludq 16(%3),%%xmm0\n" \
"pmuludq 32(%3),%%xmm6\n" \
"paddq (%3),%%xmm0\n" \
"psubq %%xmm6,%%xmm0\n" \
"movaps %%xmm0,%%xmm6\n" \
"psrlq $31,%%xmm6\n" \
"andps %%xmm5,%%xmm0\n" \
"paddq %%xmm6,%%xmm0\n" \
"movaps %%xmm0,128(%1)\n" \
"movaps %%xmm7,128(%2)\n" \
"\n" \
"movaps 144(%1),%%xmm1\n" \
"movaps %%xmm1,%%xmm7\n" \
"movaps 144(%2),%%xmm6\n" \
"pmuludq 16(%3),%%xmm1\n" \
"pmuludq 32(%3),%%xmm6\n" \
"paddq (%3),%%xmm1\n" \
"psubq %%xmm6,%%xmm1\n" \
"movaps %%xmm1,%%xmm6\n" \
"psrlq $31,%%xmm6\n" \
"andps %%xmm5,%%xmm1\n" \
"paddq %%xmm6,%%xmm1\n" \
"movaps %%xmm1,144(%1)\n" \
"movaps %%xmm7,144(%2)\n" \
"\n" \
"movaps 160(%1),%%xmm2\n" \
"movaps %%xmm2,%%xmm7\n" \
"movaps 160(%2),%%xmm6\n" \
"pmuludq 16(%3),%%xmm2\n" \
"pmuludq 32(%3),%%xmm6\n" \
"paddq (%3),%%xmm2\n" \
"psubq %%xmm6,%%xmm2\n" \
"movaps %%xmm2,%%xmm6\n" \
"psrlq $31,%%xmm6\n" \
"andps %%xmm5,%%xmm2\n" \
"paddq %%xmm6,%%xmm2\n" \
"movaps %%xmm2,160(%1)\n" \
"movaps %%xmm7,160(%2)\n" \
"\n" \
"movaps 176(%1),%%xmm3\n" \
"movaps %%xmm3,%%xmm7\n" \
"movaps 176(%2),%%xmm6\n" \
"pmuludq 16(%3),%%xmm3\n" \
"pmuludq 32(%3),%%xmm6\n" \
"paddq (%3),%%xmm3\n" \
"psubq %%xmm6,%%xmm3\n" \
"movaps %%xmm3,%%xmm6\n" \
"psrlq $31,%%xmm6\n" \
"andps %%xmm5,%%xmm3\n" \
"paddq %%xmm6,%%xmm3\n" \
"movaps %%xmm3,176(%1)\n" \
"movaps %%xmm7,176(%2)\n" \
"\n" \
"shufps $136,%%xmm1,%%xmm0\n" \
"shufps $136,%%xmm3,%%xmm2\n" \
"psrld $30,%%xmm0\n" \
"psrld $30,%%xmm2\n" \
"packssdw %%xmm2,%%xmm0\n" \
"\n" \
"movaps 192(%1),%%xmm1\n" \
"movaps %%xmm1,%%xmm7\n" \
"movaps 192(%2),%%xmm6\n" \
"pmuludq 16(%3),%%xmm1\n" \
"pmuludq 32(%3),%%xmm6\n" \
"paddq (%3),%%xmm1\n" \
"psubq %%xmm6,%%xmm1\n" \
"movaps %%xmm1,%%xmm6\n" \
"psrlq $31,%%xmm6\n" \
"andps %%xmm5,%%xmm1\n" \
"paddq %%xmm6,%%xmm1\n" \
"movaps %%xmm1,192(%1)\n" \
"movaps %%xmm7,192(%2)\n" \
"\n" \
"movaps 208(%1),%%xmm2\n" \
"movaps %%xmm2,%%xmm7\n" \
"movaps 208(%2),%%xmm6\n" \
"pmuludq 16(%3),%%xmm2\n" \
"pmuludq 32(%3),%%xmm6\n" \
"paddq (%3),%%xmm2\n" \
"psubq %%xmm6,%%xmm2\n" \
"movaps %%xmm2,%%xmm6\n" \
"psrlq $31,%%xmm6\n" \
"andps %%xmm5,%%xmm2\n" \
"paddq %%xmm6,%%xmm2\n" \
"movaps %%xmm2,208(%1)\n" \
"movaps %%xmm7,208(%2)\n" \
"\n" \
"movaps 224(%1),%%xmm3\n" \
"movaps %%xmm3,%%xmm7\n" \
"movaps 224(%2),%%xmm6\n" \
"pmuludq 16(%3),%%xmm3\n" \
"pmuludq 32(%3),%%xmm6\n" \
"paddq (%3),%%xmm3\n" \
"psubq %%xmm6,%%xmm3\n" \
"movaps %%xmm3,%%xmm6\n" \
"psrlq $31,%%xmm6\n" \
"andps %%xmm5,%%xmm3\n" \
"paddq %%xmm6,%%xmm3\n" \
"movaps %%xmm3,224(%1)\n" \
"movaps %%xmm7,224(%2)\n" \
"\n" \
"movaps 240(%1),%%xmm4\n" \
"movaps %%xmm4,%%xmm7\n" \
"movaps 240(%2),%%xmm6\n" \
"pmuludq 16(%3),%%xmm4\n" \
"pmuludq 32(%3),%%xmm6\n" \
"paddq (%3),%%xmm4\n" \
"psubq %%xmm6,%%xmm4\n" \
"movaps %%xmm4,%%xmm6\n" \
"psrlq $31,%%xmm6\n" \
"andps %%xmm5,%%xmm4\n" \
"paddq %%xmm6,%%xmm4\n" \
"movaps %%xmm4,240(%1)\n" \
"movaps %%xmm7,240(%2)\n" \
"\n" \
"shufps $136,%%xmm2,%%xmm1\n" \
"shufps $136,%%xmm4,%%xmm3\n" \
"psrld $30,%%xmm1\n" \
"psrld $30,%%xmm3\n" \
"packssdw %%xmm3,%%xmm1\n" \
"\n" \
"packsswb %%xmm1,%%xmm0\n" \
"psllw $7,%%xmm0\n" \
"pmovmskb %%xmm0,%0\n" \
"shll $16,%0\n" \
"":"=r"(output2):"r"(state->xN),"r"(state->xP),"r"(gm31_Consts));
asm volatile("\n" \
"addl %1,%0\n" \
"\n" \
"":"=r"(output2):"r"(output1),"0"(output2));
return output2;
}
__device__ __host__ void gm31_get_sse_state_(gm31_state* state,gm31_sse_state* sse_state){
int i;
for(i=0;i<32;i++) {
sse_state->xN[2*i]=state->xN[i]; sse_state->xP[2*i]=state->xP[i];
sse_state->xN[2*i+1]=sse_state->xP[2*i+1]=0;
}
}
__device__ __host__ unsigned gm31_mod_g(unsigned long long x){ // returns x (mod g)
unsigned long long F,G; G=x;
do{ F=(G>>31); G = (G-(F<<31)+F); } while(G>gm31_g);
return G;
}
__device__ __host__ unsigned gm31_CNext(unsigned N,unsigned P){
unsigned long long curr1,curr2,curr3;
curr1=(unsigned long long)gm31_k*(unsigned long long)N;
curr2=(unsigned long long)gm31_q*(unsigned long long)P;
curr3=gm31_mod_g(gm31_qg+curr1-curr2); return curr3;
}
__device__ __host__ unsigned gm31_CNext2(unsigned N,unsigned P,unsigned myk,unsigned myq){
unsigned long long curr1,curr2,curr3;
curr1=(unsigned long long)myk*(unsigned long long)N;
curr2=(unsigned long long)myq*(unsigned long long)P;
curr3=gm31_mod_g((unsigned long long)myq*(unsigned long long)gm31_g+curr1-curr2);
return curr3;
}
__device__ __host__ unsigned gm31_GetNextN(unsigned x0,unsigned x1,unsigned n){ // returns x_{2^n}
unsigned myk=gm31_k,myq=gm31_q,i,x=x1;
for(i=0;i<n;i++){
x=gm31_CNext2(x,x0,myk,myq);
myk=gm31_CNext2(myk,2,myk,myq);
myq=gm31_CNext2(myq,0,myq,0);
}
return x;
}
__device__ __host__ unsigned gm31_GetNextAny(unsigned x0,unsigned x1,unsigned long long N){ // returns x_N
unsigned long long i; unsigned xp=x0,xn=x1,xpnew,xnnew,shift=0;
i=N; while(i>0){
if(i%2==1){ // xp,xn ----> 2^shift
xpnew=gm31_GetNextN(xp,xn,shift);
xnnew=gm31_GetNextN(xn,gm31_CNext(xn,xp),shift);
xp=xpnew; xn=xnnew;
}
i/=2; shift++;
}
return xp;
}
__device__ __host__ void gm31_skipahead_(gm31_state* state, unsigned long long offset){
unsigned xn,xp,j;
for(j=0;j<32;j++){
xp=gm31_GetNextAny(state->xP[j],state->xN[j],offset);
xn=gm31_GetNextAny(state->xP[j],state->xN[j],offset+1);
state->xP[j]=xp; state->xN[j]=xn;
}
}
__device__ __host__ void gm31_init_(gm31_state* state){
unsigned x0=554937932UL,x1=1253942293UL,xp,xn,j;
for(j=0;j<32;j++){
xp=gm31_GetNextAny(x0,x1,144115183781032008ULL);
xn=gm31_GetNextAny(x0,x1,144115183781032009ULL);
state->xP[j]=xp; state->xN[j]=xn; x0=xp; x1=xn;
}
}
__device__ __host__ void gm31_init_short_sequence_(gm31_state* state,unsigned SequenceNumber){
gm31_init_(state); // 0 <= SequenceNumber < 10^9; length of each sequence <= 8*10^7
gm31_skipahead_(state,82927047ULL*(unsigned long long)SequenceNumber);
}
__device__ __host__ void gm31_init_medium_sequence_(gm31_state* state,unsigned SequenceNumber){
gm31_init_(state); // 0 <= SequenceNumber < 10^7; length of each sequence <= 8*10^9
gm31_skipahead_(state,8799201913ULL*(unsigned long long)SequenceNumber);
}
__device__ __host__ void gm31_init_long_sequence_(gm31_state* state,unsigned SequenceNumber){
gm31_init_(state); // 0 <= SequenceNumber < 10^5; length of each sequence <= 8*10^11
gm31_skipahead_(state,828317697521ULL*(unsigned long long)SequenceNumber);
}
__device__ __host__ unsigned int gm31_generate_(gm31_state* state){
int i; unsigned temp,sum=0,bit=1;
for(i=0;i<32;i++){
temp=gm31_CNext(state->xN[i],state->xP[i]);
state->xP[i]=state->xN[i]; state->xN[i]=temp;
sum += ((temp<gm31_halfg)?0:bit); bit*=2;
}
return sum;
}
__device__ __host__ float gm31_generate_uniform_float_(gm31_state* state){
int i; unsigned temp,sum=0,bit=1;
for(i=0;i<32;i++){
temp=gm31_CNext(state->xN[i],state->xP[i]);
state->xP[i]=state->xN[i]; state->xN[i]=temp;
sum += ((temp<gm31_halfg)?0:bit); bit*=2;
}
return ((float) sum) * 2.3283064365386963e-10;
}
__host__ void gm31_print_state_(gm31_state* state){int i;
printf("Generator State:\nxN={");
for(i=0;i<32;i++) {printf("%u",state->xN[i]%gm31_g); printf((i<31)?",":"}\nxP={");}
for(i=0;i<32;i++) {printf("%u",state->xP[i]%gm31_g); printf((i<31)?",":"}\n\n");}
}
__host__ void gm31_print_sse_state_(gm31_sse_state* state){int i;
printf("Generator State:\nxN={");
for(i=0;i<32;i++) {printf("%u",state->xN[2*i]%gm31_g); printf((i<31)?",":"}\nxP={");}
for(i=0;i<32;i++) {printf("%u",state->xP[2*i]%gm31_g); printf((i<31)?",":"}\n\n");}
}
__global__ void gm31_kernel_generate_array(gm31_state* state, unsigned int* out, long* length) {
unsigned temp,i,orbit,seqNum; long offset;
__shared__ unsigned xP[gm31_THREADS]; // one generator per s=32 threads, i.e. one orbit
__shared__ unsigned xN[gm31_THREADS]; // per thread, i.e. blockDim.x orbits per block
__shared__ unsigned a[gm31_THREADS]; // array "a" contains corresponding parts of output
orbit = threadIdx.x % 32;
seqNum = (threadIdx.x + blockIdx.x * blockDim.x)>>5; // RNG_sequence index
offset = seqNum*(*length); // start of the section in the output array
xP[threadIdx.x]=gm31_GetNextAny(state->xP[orbit],state->xN[orbit],offset);
xN[threadIdx.x]=gm31_GetNextAny(state->xP[orbit],state->xN[orbit],offset+1);
for(i=0;i<(*length);i++){ // each s=32 threads result in "length" values in the output array
temp = gm31_CNext( xN[threadIdx.x], xP[threadIdx.x] );
xP[threadIdx.x] = xN[threadIdx.x]; xN[threadIdx.x] = temp;
a[threadIdx.x] = (temp < gm31_halfg ? 0 : (1<<orbit) );
__syncthreads();
if((orbit&3)==0) a[threadIdx.x] = a[threadIdx.x]+a[threadIdx.x+1]+a[threadIdx.x+2]+a[threadIdx.x+3];
__syncthreads();
if((orbit&15)==0) a[threadIdx.x] = a[threadIdx.x]+a[threadIdx.x+4]+a[threadIdx.x+8]+a[threadIdx.x+12];
__syncthreads();
if(orbit==0){ out[offset+i]=a[threadIdx.x]+a[threadIdx.x+16]; }
}
}
__host__ void gm31_generate_gpu_array_(gm31_state* state, unsigned int* dev_out, long length){
long mylength = length/gm31_ARRAY_SECTIONS;
gm31_state* dev_state;
long* dev_length;
if((mylength*gm31_ARRAY_SECTIONS)<length) mylength++;
gm31_CUDA_CALL(hipMalloc((void**)&dev_state,sizeof(gm31_state)));
gm31_CUDA_CALL(hipMalloc((void**)&dev_length,sizeof(long)));
gm31_CUDA_CALL(hipMemcpy(dev_state,state,sizeof(gm31_state),hipMemcpyHostToDevice));
gm31_CUDA_CALL(hipMemcpy(dev_length,&mylength,sizeof(long),hipMemcpyHostToDevice));
gm31_kernel_generate_array<<<gm31_BLOCKS,gm31_THREADS>>>(dev_state,dev_out,dev_length);
gm31_CUDA_CALL(hipGetLastError());
gm31_CUDA_CALL(hipFree(dev_state)); gm31_CUDA_CALL(hipFree(dev_length));
}
__global__ void gm31_kernel_generate_array_float(gm31_state* state, float* out, long* length) {
unsigned temp,sum,i,orbit,seqNum; long offset;
__shared__ unsigned xP[gm31_THREADS]; // one generator per s=32 threads, i.e. one orbit
__shared__ unsigned xN[gm31_THREADS]; // per thread, i.e. blockDim.x orbits per block
__shared__ unsigned a[gm31_THREADS]; // array "a" contains corresponding parts of output
orbit = threadIdx.x % 32;
seqNum = (threadIdx.x + blockIdx.x * blockDim.x)>>5; // RNG_sequence index
offset = seqNum*(*length); // start of the section in the output array
xP[threadIdx.x]=gm31_GetNextAny(state->xP[orbit],state->xN[orbit],offset);
xN[threadIdx.x]=gm31_GetNextAny(state->xP[orbit],state->xN[orbit],offset+1);
for(i=0;i<(*length);i++){ // each s=32 threads result in "length" values in the output array
temp = gm31_CNext( xN[threadIdx.x], xP[threadIdx.x] );
xP[threadIdx.x] = xN[threadIdx.x]; xN[threadIdx.x] = temp;
a[threadIdx.x] = (temp < gm31_halfg ? 0 : (1<<orbit) );
__syncthreads();
if((orbit&3)==0) a[threadIdx.x] = a[threadIdx.x]+a[threadIdx.x+1]+a[threadIdx.x+2]+a[threadIdx.x+3];
__syncthreads();
if((orbit&15)==0) a[threadIdx.x] = a[threadIdx.x]+a[threadIdx.x+4]+a[threadIdx.x+8]+a[threadIdx.x+12];
__syncthreads();
if(orbit==0){ sum=a[threadIdx.x]+a[threadIdx.x+16]; out[offset+i]=((float)sum) * 2.3283064365386963e-10; }
}
}
__host__ void gm31_generate_gpu_array_float_(gm31_state* state, float* dev_out, long length){
long mylength = length/gm31_ARRAY_SECTIONS;
gm31_state* dev_state;
long* dev_length;
if((mylength*gm31_ARRAY_SECTIONS)<length) mylength++;
gm31_CUDA_CALL(hipMalloc((void**)&dev_state,sizeof(gm31_state)));
gm31_CUDA_CALL(hipMalloc((void**)&dev_length,sizeof(long)));
gm31_CUDA_CALL(hipMemcpy(dev_state,state,sizeof(gm31_state),hipMemcpyHostToDevice));
gm31_CUDA_CALL(hipMemcpy(dev_length,&mylength,sizeof(long),hipMemcpyHostToDevice));
gm31_kernel_generate_array_float<<<gm31_BLOCKS,gm31_THREADS>>>(dev_state,dev_out,dev_length);
gm31_CUDA_CALL(hipGetLastError());
gm31_CUDA_CALL(hipFree(dev_state)); gm31_CUDA_CALL(hipFree(dev_length));
}
__global__ void gm31_kernel_generate_array_double(gm31_state* state, double* out, long* length) {
unsigned temp,sum,i,orbit,seqNum; long offset;
__shared__ unsigned xP[gm31_THREADS]; // one generator per s=32 threads, i.e. one orbit
__shared__ unsigned xN[gm31_THREADS]; // per thread, i.e. blockDim.x orbits per block
__shared__ unsigned a[gm31_THREADS]; // array "a" contains corresponding parts of output
orbit = threadIdx.x % 32;
seqNum = (threadIdx.x + blockIdx.x * blockDim.x)>>5; // RNG_sequence index
offset = seqNum*(*length); // start of the section in the output array
xP[threadIdx.x]=gm31_GetNextAny(state->xP[orbit],state->xN[orbit],offset);
xN[threadIdx.x]=gm31_GetNextAny(state->xP[orbit],state->xN[orbit],offset+1);
for(i=0;i<(*length);i++){ // each s=32 threads result in "length" values in the output array
temp = gm31_CNext( xN[threadIdx.x], xP[threadIdx.x] );
xP[threadIdx.x] = xN[threadIdx.x]; xN[threadIdx.x] = temp;
a[threadIdx.x] = (temp < gm31_halfg ? 0 : (1<<orbit) );
__syncthreads();
if((orbit&3)==0) a[threadIdx.x] = a[threadIdx.x]+a[threadIdx.x+1]+a[threadIdx.x+2]+a[threadIdx.x+3];
__syncthreads();
if((orbit&15)==0) a[threadIdx.x] = a[threadIdx.x]+a[threadIdx.x+4]+a[threadIdx.x+8]+a[threadIdx.x+12];
__syncthreads();
if(orbit==0){ sum=a[threadIdx.x]+a[threadIdx.x+16]; out[offset+i]=((double)sum) * 2.3283064365386963e-10; }
}
}
__host__ void gm31_generate_gpu_array_double_(gm31_state* state, double* dev_out, long length){
long mylength = length/gm31_ARRAY_SECTIONS;
gm31_state* dev_state;
long* dev_length;
if((mylength*gm31_ARRAY_SECTIONS)<length) mylength++;
gm31_CUDA_CALL(hipMalloc((void**)&dev_state,sizeof(gm31_state)));
gm31_CUDA_CALL(hipMalloc((void**)&dev_length,sizeof(long)));
gm31_CUDA_CALL(hipMemcpy(dev_state,state,sizeof(gm31_state),hipMemcpyHostToDevice));
gm31_CUDA_CALL(hipMemcpy(dev_length,&mylength,sizeof(long),hipMemcpyHostToDevice));
gm31_kernel_generate_array_double<<<gm31_BLOCKS,gm31_THREADS>>>(dev_state,dev_out,dev_length);
gm31_CUDA_CALL(hipGetLastError());
gm31_CUDA_CALL(hipFree(dev_state)); gm31_CUDA_CALL(hipFree(dev_length));
}
__host__ void gm31_generate_array_(gm31_state* state, unsigned int* out, long length){
long mylength = length/gm31_ARRAY_SECTIONS;
gm31_state* dev_state;
unsigned int* dev_out;
long* dev_length;
if((mylength*gm31_ARRAY_SECTIONS)<length) mylength++;
gm31_CUDA_CALL(hipMalloc((void**)&dev_state,sizeof(gm31_state)));
gm31_CUDA_CALL(hipMalloc((void**)&dev_out,mylength*gm31_ARRAY_SECTIONS*sizeof(unsigned int)));
gm31_CUDA_CALL(hipMalloc((void**)&dev_length,sizeof(long)));
gm31_CUDA_CALL(hipMemcpy(dev_state,state,sizeof(gm31_state),hipMemcpyHostToDevice));
gm31_CUDA_CALL(hipMemcpy(dev_length,&mylength,sizeof(long),hipMemcpyHostToDevice));
gm31_kernel_generate_array<<<gm31_BLOCKS,gm31_THREADS>>>(dev_state,dev_out,dev_length);
gm31_CUDA_CALL(hipGetLastError());
gm31_CUDA_CALL(hipMemcpy(out,dev_out,length*sizeof(unsigned int),hipMemcpyDeviceToHost));
gm31_CUDA_CALL(hipFree(dev_state)); gm31_CUDA_CALL(hipFree(dev_out));
gm31_CUDA_CALL(hipFree(dev_length));
} |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <unistd.h>
#include <string.h>
#define DATA_LENGTH 100
#define CUDA_CALL(X) X; // {if(cudaError == X){printf("Error Calling %s at line %s\n", #X, __LINE__);}}
float * genInput(int l);
void verify(float *a, float *b, float *c, int l);
__global__ void vecAdd(float *in1, float *in2, float *out, int len) {
int i=0;
int tid = threadIdx.x;
for(i=0; i<DATA_LENGTH; i++){
// first half of first warp and second half of 2nd warp
if(tid < 16 || tid > 47)
out[i] = in1[i]+in2[i];
else
out[i] = in1[i]+in2[i];
}
for(i=0; i<DATA_LENGTH; i++){
// only even threads not compactable
if(threadIdx.x%2 == 0 )
out[i] = in1[i]+in2[i];
else
out[i] = in1[i]+in2[i];
}
out[0] = in1[0] + in2[0];
}
int main(int argc, char **argv) {
int inputLength;
float *hostInput1;
float *hostInput2;
float *hostOutput;
float *deviceInput1;
float *deviceInput2;
float *deviceOutput;
struct timeval t;
gettimeofday(&t, NULL);
srand(t.tv_sec);
inputLength = DATA_LENGTH;
hostInput1 = genInput(inputLength);
hostInput2 = genInput(inputLength);
hostOutput = ( float * )malloc(inputLength * sizeof(float));
//@@ Allocate GPU memory here
CUDA_CALL(cudaMalloc((void**)&deviceInput1, inputLength*sizeof(float)));
CUDA_CALL(cudaMalloc((void**)&deviceInput2, inputLength*sizeof(float)));
CUDA_CALL(cudaMalloc((void**)&deviceOutput, inputLength*sizeof(float)));
//@@ Copy memory to the GPU here
CUDA_CALL(cudaMemcpy(deviceInput1, hostInput1, sizeof(float)*inputLength, cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpy(deviceInput2, hostInput2, sizeof(float)*inputLength, cudaMemcpyHostToDevice));
//@@ Initialize the grid and block dimensions here
dim3 numBlocks(1,1,1);
//dim3 numThreads(ThreadsPerBlock,1,1);
dim3 numThreads(64,1,1);
//@@ Launch the GPU Kernel here
vecAdd<<<numBlocks, numThreads>>>(deviceInput1, deviceInput2, deviceOutput, inputLength);
cudaDeviceSynchronize();
//@@ Copy the GPU memory back to the CPU here
CUDA_CALL(cudaMemcpy(hostOutput, deviceOutput, inputLength*sizeof(float), cudaMemcpyDeviceToHost));
//@@ Free the GPU memory here
CUDA_CALL(cudaFree(deviceInput1));
CUDA_CALL(cudaFree(deviceInput2));
CUDA_CALL(cudaFree(deviceOutput));
verify(hostInput1, hostInput2, hostOutput, inputLength);
free(hostInput1);
free(hostInput2);
free(hostOutput);
return 0;
}
float * genInput(int l)
{
int i;
float * arr = (float*)malloc(l*sizeof(float));
for(i=0; i<l; i++){
arr[i] = rand();
arr[i] = arr[i]/rand();
}
return arr;
}
void verify(float *a, float *b, float *c, int l)
{
char buff1[50] = {0};
char buff2[50] = {0};
int i;
for(i=0; i<l; i++){
float d = a[i]+b[i];
sprintf(buff1, "%1.8f", d);
sprintf(buff2, "%1.8f", c[i]);
if(strcmp(buff1, buff2) != 0){
printf("ERROR at index %d, Exp %1.8f Got %1.8f\n",i,d,c[i]);
break;
}
}
} | .file "tmpxft_00136a3a_00000000-6_divergent_kernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2075:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2075:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z8genInputi
.type _Z8genInputi, @function
_Z8genInputi:
.LFB2071:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $8, %rsp
.cfi_def_cfa_offset 48
movl %edi, %ebx
movslq %edi, %r12
salq $2, %r12
movq %r12, %rdi
call malloc@PLT
movq %rax, %r13
testl %ebx, %ebx
jle .L3
movq %rax, %rbx
addq %rax, %r12
.L5:
call rand@PLT
pxor %xmm2, %xmm2
cvtsi2ssl %eax, %xmm2
movd %xmm2, %ebp
movss %xmm2, (%rbx)
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
movd %ebp, %xmm1
divss %xmm0, %xmm1
movss %xmm1, (%rbx)
addq $4, %rbx
cmpq %r12, %rbx
jne .L5
.L3:
movq %r13, %rax
addq $8, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2071:
.size _Z8genInputi, .-_Z8genInputi
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "%1.8f"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC1:
.string "ERROR at index %d, Exp %1.8f Got %1.8f\n"
.text
.globl _Z6verifyPfS_S_i
.type _Z6verifyPfS_S_i, @function
_Z6verifyPfS_S_i:
.LFB2072:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $168, %rsp
.cfi_def_cfa_offset 224
movq %rsi, 8(%rsp)
movq %fs:40, %rax
movq %rax, 152(%rsp)
xorl %eax, %eax
movq $0, 32(%rsp)
movq $0, 40(%rsp)
movq $0, 48(%rsp)
movq $0, 56(%rsp)
movq $0, 64(%rsp)
movq $0, 72(%rsp)
movw $0, 80(%rsp)
movq $0, 96(%rsp)
movq $0, 104(%rsp)
movq $0, 112(%rsp)
movq $0, 120(%rsp)
movq $0, 128(%rsp)
movq $0, 136(%rsp)
movw $0, 144(%rsp)
testl %ecx, %ecx
jle .L8
movq %rdi, %r15
movq %rdx, %rbp
movslq %ecx, %rax
movq %rax, 24(%rsp)
movl $0, %ebx
leaq .LC0(%rip), %r14
.L11:
movss (%r15,%rbx,4), %xmm0
movq 8(%rsp), %rax
addss (%rax,%rbx,4), %xmm0
pxor %xmm2, %xmm2
cvtss2sd %xmm0, %xmm2
movsd %xmm2, 16(%rsp)
leaq 32(%rsp), %r12
movapd %xmm2, %xmm0
movq %r14, %rcx
movl $50, %edx
movl $2, %esi
movq %r12, %rdi
movl $1, %eax
call __sprintf_chk@PLT
pxor %xmm0, %xmm0
cvtss2sd 0(%rbp), %xmm0
leaq 96(%rsp), %r13
movq %r14, %rcx
movl $50, %edx
movl $2, %esi
movq %r13, %rdi
movl $1, %eax
call __sprintf_chk@PLT
movq %r13, %rsi
movq %r12, %rdi
call strcmp@PLT
testl %eax, %eax
jne .L15
addq $1, %rbx
addq $4, %rbp
movq 24(%rsp), %rax
cmpq %rax, %rbx
jne .L11
jmp .L8
.L15:
pxor %xmm1, %xmm1
cvtss2sd 0(%rbp), %xmm1
movsd 16(%rsp), %xmm0
movl %ebx, %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $2, %eax
call __printf_chk@PLT
.L8:
movq 152(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $168, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L16:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2072:
.size _Z6verifyPfS_S_i, .-_Z6verifyPfS_S_i
.globl _Z30__device_stub__Z6vecAddPfS_S_iPfS_S_i
.type _Z30__device_stub__Z6vecAddPfS_S_iPfS_S_i, @function
_Z30__device_stub__Z6vecAddPfS_S_iPfS_S_i:
.LFB2097:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L21
.L17:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L22
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L21:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z6vecAddPfS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L17
.L22:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2097:
.size _Z30__device_stub__Z6vecAddPfS_S_iPfS_S_i, .-_Z30__device_stub__Z6vecAddPfS_S_iPfS_S_i
.globl _Z6vecAddPfS_S_i
.type _Z6vecAddPfS_S_i, @function
_Z6vecAddPfS_S_i:
.LFB2098:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z30__device_stub__Z6vecAddPfS_S_iPfS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2098:
.size _Z6vecAddPfS_S_i, .-_Z6vecAddPfS_S_i
.globl main
.type main, @function
main:
.LFB2070:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $80, %rsp
.cfi_def_cfa_offset 112
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
leaq 48(%rsp), %rdi
movl $0, %esi
call gettimeofday@PLT
movl 48(%rsp), %edi
call srand@PLT
movl $100, %edi
call _Z8genInputi
movq %rax, %r12
movl $100, %edi
call _Z8genInputi
movq %rax, %rbp
movl $400, %edi
call malloc@PLT
movq %rax, %rbx
movq %rsp, %rdi
movl $400, %esi
call cudaMalloc@PLT
leaq 8(%rsp), %rdi
movl $400, %esi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movl $400, %esi
call cudaMalloc@PLT
movl $1, %ecx
movl $400, %edx
movq %r12, %rsi
movq (%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $400, %edx
movq %rbp, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $64, 36(%rsp)
movl $1, 40(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 36(%rsp), %rdx
movl $1, %ecx
movq 24(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L29
.L26:
call cudaDeviceSynchronize@PLT
movl $2, %ecx
movl $400, %edx
movq 16(%rsp), %rsi
movq %rbx, %rdi
call cudaMemcpy@PLT
movq (%rsp), %rdi
call cudaFree@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movl $100, %ecx
movq %rbx, %rdx
movq %rbp, %rsi
movq %r12, %rdi
call _Z6verifyPfS_S_i
movq %r12, %rdi
call free@PLT
movq %rbp, %rdi
call free@PLT
movq %rbx, %rdi
call free@PLT
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L30
movl $0, %eax
addq $80, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L29:
.cfi_restore_state
movl $100, %ecx
movq 16(%rsp), %rdx
movq 8(%rsp), %rsi
movq (%rsp), %rdi
call _Z30__device_stub__Z6vecAddPfS_S_iPfS_S_i
jmp .L26
.L30:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2070:
.size main, .-main
.section .rodata.str1.1
.LC2:
.string "_Z6vecAddPfS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2100:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z6vecAddPfS_S_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2100:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <unistd.h>
#include <string.h>
#define DATA_LENGTH 100
#define CUDA_CALL(X) X; // {if(cudaError == X){printf("Error Calling %s at line %s\n", #X, __LINE__);}}
float * genInput(int l);
void verify(float *a, float *b, float *c, int l);
__global__ void vecAdd(float *in1, float *in2, float *out, int len) {
int i=0;
int tid = threadIdx.x;
for(i=0; i<DATA_LENGTH; i++){
// first half of first warp and second half of 2nd warp
if(tid < 16 || tid > 47)
out[i] = in1[i]+in2[i];
else
out[i] = in1[i]+in2[i];
}
for(i=0; i<DATA_LENGTH; i++){
// only even threads not compactable
if(threadIdx.x%2 == 0 )
out[i] = in1[i]+in2[i];
else
out[i] = in1[i]+in2[i];
}
out[0] = in1[0] + in2[0];
}
int main(int argc, char **argv) {
int inputLength;
float *hostInput1;
float *hostInput2;
float *hostOutput;
float *deviceInput1;
float *deviceInput2;
float *deviceOutput;
struct timeval t;
gettimeofday(&t, NULL);
srand(t.tv_sec);
inputLength = DATA_LENGTH;
hostInput1 = genInput(inputLength);
hostInput2 = genInput(inputLength);
hostOutput = ( float * )malloc(inputLength * sizeof(float));
//@@ Allocate GPU memory here
CUDA_CALL(cudaMalloc((void**)&deviceInput1, inputLength*sizeof(float)));
CUDA_CALL(cudaMalloc((void**)&deviceInput2, inputLength*sizeof(float)));
CUDA_CALL(cudaMalloc((void**)&deviceOutput, inputLength*sizeof(float)));
//@@ Copy memory to the GPU here
CUDA_CALL(cudaMemcpy(deviceInput1, hostInput1, sizeof(float)*inputLength, cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpy(deviceInput2, hostInput2, sizeof(float)*inputLength, cudaMemcpyHostToDevice));
//@@ Initialize the grid and block dimensions here
dim3 numBlocks(1,1,1);
//dim3 numThreads(ThreadsPerBlock,1,1);
dim3 numThreads(64,1,1);
//@@ Launch the GPU Kernel here
vecAdd<<<numBlocks, numThreads>>>(deviceInput1, deviceInput2, deviceOutput, inputLength);
cudaDeviceSynchronize();
//@@ Copy the GPU memory back to the CPU here
CUDA_CALL(cudaMemcpy(hostOutput, deviceOutput, inputLength*sizeof(float), cudaMemcpyDeviceToHost));
//@@ Free the GPU memory here
CUDA_CALL(cudaFree(deviceInput1));
CUDA_CALL(cudaFree(deviceInput2));
CUDA_CALL(cudaFree(deviceOutput));
verify(hostInput1, hostInput2, hostOutput, inputLength);
free(hostInput1);
free(hostInput2);
free(hostOutput);
return 0;
}
float * genInput(int l)
{
int i;
float * arr = (float*)malloc(l*sizeof(float));
for(i=0; i<l; i++){
arr[i] = rand();
arr[i] = arr[i]/rand();
}
return arr;
}
void verify(float *a, float *b, float *c, int l)
{
char buff1[50] = {0};
char buff2[50] = {0};
int i;
for(i=0; i<l; i++){
float d = a[i]+b[i];
sprintf(buff1, "%1.8f", d);
sprintf(buff2, "%1.8f", c[i]);
if(strcmp(buff1, buff2) != 0){
printf("ERROR at index %d, Exp %1.8f Got %1.8f\n",i,d,c[i]);
break;
}
}
} | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <unistd.h>
#include <string.h>
#define DATA_LENGTH 100
#define CUDA_CALL(X) X; // {if(cudaError == X){printf("Error Calling %s at line %s\n", #X, __LINE__);}}
float * genInput(int l);
void verify(float *a, float *b, float *c, int l);
__global__ void vecAdd(float *in1, float *in2, float *out, int len) {
int i=0;
int tid = threadIdx.x;
for(i=0; i<DATA_LENGTH; i++){
// first half of first warp and second half of 2nd warp
if(tid < 16 || tid > 47)
out[i] = in1[i]+in2[i];
else
out[i] = in1[i]+in2[i];
}
for(i=0; i<DATA_LENGTH; i++){
// only even threads not compactable
if(threadIdx.x%2 == 0 )
out[i] = in1[i]+in2[i];
else
out[i] = in1[i]+in2[i];
}
out[0] = in1[0] + in2[0];
}
int main(int argc, char **argv) {
int inputLength;
float *hostInput1;
float *hostInput2;
float *hostOutput;
float *deviceInput1;
float *deviceInput2;
float *deviceOutput;
struct timeval t;
gettimeofday(&t, NULL);
srand(t.tv_sec);
inputLength = DATA_LENGTH;
hostInput1 = genInput(inputLength);
hostInput2 = genInput(inputLength);
hostOutput = ( float * )malloc(inputLength * sizeof(float));
//@@ Allocate GPU memory here
CUDA_CALL(hipMalloc((void**)&deviceInput1, inputLength*sizeof(float)));
CUDA_CALL(hipMalloc((void**)&deviceInput2, inputLength*sizeof(float)));
CUDA_CALL(hipMalloc((void**)&deviceOutput, inputLength*sizeof(float)));
//@@ Copy memory to the GPU here
CUDA_CALL(hipMemcpy(deviceInput1, hostInput1, sizeof(float)*inputLength, hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpy(deviceInput2, hostInput2, sizeof(float)*inputLength, hipMemcpyHostToDevice));
//@@ Initialize the grid and block dimensions here
dim3 numBlocks(1,1,1);
//dim3 numThreads(ThreadsPerBlock,1,1);
dim3 numThreads(64,1,1);
//@@ Launch the GPU Kernel here
vecAdd<<<numBlocks, numThreads>>>(deviceInput1, deviceInput2, deviceOutput, inputLength);
hipDeviceSynchronize();
//@@ Copy the GPU memory back to the CPU here
CUDA_CALL(hipMemcpy(hostOutput, deviceOutput, inputLength*sizeof(float), hipMemcpyDeviceToHost));
//@@ Free the GPU memory here
CUDA_CALL(hipFree(deviceInput1));
CUDA_CALL(hipFree(deviceInput2));
CUDA_CALL(hipFree(deviceOutput));
verify(hostInput1, hostInput2, hostOutput, inputLength);
free(hostInput1);
free(hostInput2);
free(hostOutput);
return 0;
}
float * genInput(int l)
{
int i;
float * arr = (float*)malloc(l*sizeof(float));
for(i=0; i<l; i++){
arr[i] = rand();
arr[i] = arr[i]/rand();
}
return arr;
}
void verify(float *a, float *b, float *c, int l)
{
char buff1[50] = {0};
char buff2[50] = {0};
int i;
for(i=0; i<l; i++){
float d = a[i]+b[i];
sprintf(buff1, "%1.8f", d);
sprintf(buff2, "%1.8f", c[i]);
if(strcmp(buff1, buff2) != 0){
printf("ERROR at index %d, Exp %1.8f Got %1.8f\n",i,d,c[i]);
break;
}
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <unistd.h>
#include <string.h>
#define DATA_LENGTH 100
#define CUDA_CALL(X) X; // {if(cudaError == X){printf("Error Calling %s at line %s\n", #X, __LINE__);}}
float * genInput(int l);
void verify(float *a, float *b, float *c, int l);
__global__ void vecAdd(float *in1, float *in2, float *out, int len) {
int i=0;
int tid = threadIdx.x;
for(i=0; i<DATA_LENGTH; i++){
// first half of first warp and second half of 2nd warp
if(tid < 16 || tid > 47)
out[i] = in1[i]+in2[i];
else
out[i] = in1[i]+in2[i];
}
for(i=0; i<DATA_LENGTH; i++){
// only even threads not compactable
if(threadIdx.x%2 == 0 )
out[i] = in1[i]+in2[i];
else
out[i] = in1[i]+in2[i];
}
out[0] = in1[0] + in2[0];
}
int main(int argc, char **argv) {
int inputLength;
float *hostInput1;
float *hostInput2;
float *hostOutput;
float *deviceInput1;
float *deviceInput2;
float *deviceOutput;
struct timeval t;
gettimeofday(&t, NULL);
srand(t.tv_sec);
inputLength = DATA_LENGTH;
hostInput1 = genInput(inputLength);
hostInput2 = genInput(inputLength);
hostOutput = ( float * )malloc(inputLength * sizeof(float));
//@@ Allocate GPU memory here
CUDA_CALL(hipMalloc((void**)&deviceInput1, inputLength*sizeof(float)));
CUDA_CALL(hipMalloc((void**)&deviceInput2, inputLength*sizeof(float)));
CUDA_CALL(hipMalloc((void**)&deviceOutput, inputLength*sizeof(float)));
//@@ Copy memory to the GPU here
CUDA_CALL(hipMemcpy(deviceInput1, hostInput1, sizeof(float)*inputLength, hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpy(deviceInput2, hostInput2, sizeof(float)*inputLength, hipMemcpyHostToDevice));
//@@ Initialize the grid and block dimensions here
dim3 numBlocks(1,1,1);
//dim3 numThreads(ThreadsPerBlock,1,1);
dim3 numThreads(64,1,1);
//@@ Launch the GPU Kernel here
vecAdd<<<numBlocks, numThreads>>>(deviceInput1, deviceInput2, deviceOutput, inputLength);
hipDeviceSynchronize();
//@@ Copy the GPU memory back to the CPU here
CUDA_CALL(hipMemcpy(hostOutput, deviceOutput, inputLength*sizeof(float), hipMemcpyDeviceToHost));
//@@ Free the GPU memory here
CUDA_CALL(hipFree(deviceInput1));
CUDA_CALL(hipFree(deviceInput2));
CUDA_CALL(hipFree(deviceOutput));
verify(hostInput1, hostInput2, hostOutput, inputLength);
free(hostInput1);
free(hostInput2);
free(hostOutput);
return 0;
}
float * genInput(int l)
{
int i;
float * arr = (float*)malloc(l*sizeof(float));
for(i=0; i<l; i++){
arr[i] = rand();
arr[i] = arr[i]/rand();
}
return arr;
}
void verify(float *a, float *b, float *c, int l)
{
char buff1[50] = {0};
char buff2[50] = {0};
int i;
for(i=0; i<l; i++){
float d = a[i]+b[i];
sprintf(buff1, "%1.8f", d);
sprintf(buff2, "%1.8f", c[i]);
if(strcmp(buff1, buff2) != 0){
printf("ERROR at index %d, Exp %1.8f Got %1.8f\n",i,d,c[i]);
break;
}
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z6vecAddPfS_S_i
.globl _Z6vecAddPfS_S_i
.p2align 8
.type _Z6vecAddPfS_S_i,@function
_Z6vecAddPfS_S_i:
s_clause 0x1
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[0:1], s[0:1], 0x10
v_mov_b32_e32 v0, 0
s_mov_b64 s[2:3], 0
.p2align 6
.LBB0_1:
s_waitcnt lgkmcnt(0)
s_add_u32 s8, s4, s2
s_addc_u32 s9, s5, s3
s_add_u32 s10, s6, s2
s_addc_u32 s11, s7, s3
s_clause 0x1
global_load_b32 v1, v0, s[8:9]
global_load_b32 v2, v0, s[10:11]
s_add_u32 s8, s0, s2
s_addc_u32 s9, s1, s3
s_add_u32 s2, s2, 4
s_addc_u32 s3, s3, 0
s_cmpk_lg_i32 s2, 0x190
s_waitcnt vmcnt(0)
v_add_f32_e32 v1, v1, v2
global_store_b32 v0, v1, s[8:9]
s_cbranch_scc1 .LBB0_1
v_mov_b32_e32 v0, 0
s_mov_b64 s[2:3], 0
.p2align 6
.LBB0_3:
s_delay_alu instid0(SALU_CYCLE_1)
s_add_u32 s8, s4, s2
s_addc_u32 s9, s5, s3
s_add_u32 s10, s6, s2
s_addc_u32 s11, s7, s3
s_clause 0x1
global_load_b32 v1, v0, s[8:9]
global_load_b32 v2, v0, s[10:11]
s_add_u32 s8, s0, s2
s_addc_u32 s9, s1, s3
s_add_u32 s2, s2, 4
s_addc_u32 s3, s3, 0
s_cmpk_lg_i32 s2, 0x190
s_waitcnt vmcnt(0)
v_add_f32_e32 v1, v1, v2
global_store_b32 v0, v1, s[8:9]
s_cbranch_scc1 .LBB0_3
v_mov_b32_e32 v0, 0
s_clause 0x1
global_load_b32 v1, v0, s[4:5]
global_load_b32 v2, v0, s[6:7]
s_waitcnt vmcnt(0)
v_add_f32_e32 v1, v1, v2
global_store_b32 v0, v1, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z6vecAddPfS_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 28
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 12
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z6vecAddPfS_S_i, .Lfunc_end0-_Z6vecAddPfS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 28
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z6vecAddPfS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 12
.sgpr_spill_count: 0
.symbol: _Z6vecAddPfS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <unistd.h>
#include <string.h>
#define DATA_LENGTH 100
#define CUDA_CALL(X) X; // {if(cudaError == X){printf("Error Calling %s at line %s\n", #X, __LINE__);}}
float * genInput(int l);
void verify(float *a, float *b, float *c, int l);
__global__ void vecAdd(float *in1, float *in2, float *out, int len) {
int i=0;
int tid = threadIdx.x;
for(i=0; i<DATA_LENGTH; i++){
// first half of first warp and second half of 2nd warp
if(tid < 16 || tid > 47)
out[i] = in1[i]+in2[i];
else
out[i] = in1[i]+in2[i];
}
for(i=0; i<DATA_LENGTH; i++){
// only even threads not compactable
if(threadIdx.x%2 == 0 )
out[i] = in1[i]+in2[i];
else
out[i] = in1[i]+in2[i];
}
out[0] = in1[0] + in2[0];
}
int main(int argc, char **argv) {
int inputLength;
float *hostInput1;
float *hostInput2;
float *hostOutput;
float *deviceInput1;
float *deviceInput2;
float *deviceOutput;
struct timeval t;
gettimeofday(&t, NULL);
srand(t.tv_sec);
inputLength = DATA_LENGTH;
hostInput1 = genInput(inputLength);
hostInput2 = genInput(inputLength);
hostOutput = ( float * )malloc(inputLength * sizeof(float));
//@@ Allocate GPU memory here
CUDA_CALL(hipMalloc((void**)&deviceInput1, inputLength*sizeof(float)));
CUDA_CALL(hipMalloc((void**)&deviceInput2, inputLength*sizeof(float)));
CUDA_CALL(hipMalloc((void**)&deviceOutput, inputLength*sizeof(float)));
//@@ Copy memory to the GPU here
CUDA_CALL(hipMemcpy(deviceInput1, hostInput1, sizeof(float)*inputLength, hipMemcpyHostToDevice));
CUDA_CALL(hipMemcpy(deviceInput2, hostInput2, sizeof(float)*inputLength, hipMemcpyHostToDevice));
//@@ Initialize the grid and block dimensions here
dim3 numBlocks(1,1,1);
//dim3 numThreads(ThreadsPerBlock,1,1);
dim3 numThreads(64,1,1);
//@@ Launch the GPU Kernel here
vecAdd<<<numBlocks, numThreads>>>(deviceInput1, deviceInput2, deviceOutput, inputLength);
hipDeviceSynchronize();
//@@ Copy the GPU memory back to the CPU here
CUDA_CALL(hipMemcpy(hostOutput, deviceOutput, inputLength*sizeof(float), hipMemcpyDeviceToHost));
//@@ Free the GPU memory here
CUDA_CALL(hipFree(deviceInput1));
CUDA_CALL(hipFree(deviceInput2));
CUDA_CALL(hipFree(deviceOutput));
verify(hostInput1, hostInput2, hostOutput, inputLength);
free(hostInput1);
free(hostInput2);
free(hostOutput);
return 0;
}
float * genInput(int l)
{
int i;
float * arr = (float*)malloc(l*sizeof(float));
for(i=0; i<l; i++){
arr[i] = rand();
arr[i] = arr[i]/rand();
}
return arr;
}
void verify(float *a, float *b, float *c, int l)
{
char buff1[50] = {0};
char buff2[50] = {0};
int i;
for(i=0; i<l; i++){
float d = a[i]+b[i];
sprintf(buff1, "%1.8f", d);
sprintf(buff2, "%1.8f", c[i]);
if(strcmp(buff1, buff2) != 0){
printf("ERROR at index %d, Exp %1.8f Got %1.8f\n",i,d,c[i]);
break;
}
}
} | .text
.file "divergent_kernel.hip"
.globl _Z21__device_stub__vecAddPfS_S_i # -- Begin function _Z21__device_stub__vecAddPfS_S_i
.p2align 4, 0x90
.type _Z21__device_stub__vecAddPfS_S_i,@function
_Z21__device_stub__vecAddPfS_S_i: # @_Z21__device_stub__vecAddPfS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z6vecAddPfS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z21__device_stub__vecAddPfS_S_i, .Lfunc_end0-_Z21__device_stub__vecAddPfS_S_i
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $232, %rsp
.cfi_def_cfa_offset 288
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
xorl %r14d, %r14d
leaq 216(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
movl 216(%rsp), %edi
callq srand
movl $400, %edi # imm = 0x190
callq malloc
movq %rax, %rbx
.p2align 4, 0x90
.LBB1_1: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
callq rand
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, (%rsp) # 4-byte Spill
movss %xmm0, (%rbx,%r14,4)
callq rand
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss (%rsp), %xmm1 # 4-byte Reload
# xmm1 = mem[0],zero,zero,zero
divss %xmm0, %xmm1
movss %xmm1, (%rbx,%r14,4)
incq %r14
cmpq $100, %r14
jne .LBB1_1
# %bb.2: # %_Z8genInputi.exit
movl $400, %edi # imm = 0x190
callq malloc
movq %rax, %r14
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB1_3: # %.lr.ph.i25
# =>This Inner Loop Header: Depth=1
callq rand
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, (%rsp) # 4-byte Spill
movss %xmm0, (%r14,%r15,4)
callq rand
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss (%rsp), %xmm1 # 4-byte Reload
# xmm1 = mem[0],zero,zero,zero
divss %xmm0, %xmm1
movss %xmm1, (%r14,%r15,4)
incq %r15
cmpq $100, %r15
jne .LBB1_3
# %bb.4: # %_Z8genInputi.exit29
movl $400, %edi # imm = 0x190
callq malloc
movq %rax, %r15
leaq 24(%rsp), %rdi
movl $400, %esi # imm = 0x190
callq hipMalloc
leaq 16(%rsp), %rdi
movl $400, %esi # imm = 0x190
callq hipMalloc
leaq 8(%rsp), %rdi
movl $400, %esi # imm = 0x190
callq hipMalloc
movq 24(%rsp), %rdi
movl $400, %edx # imm = 0x190
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movq 16(%rsp), %rdi
movl $400, %edx # imm = 0x190
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
movabsq $4294967297, %rdi # imm = 0x100000001
leaq 63(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_6
# %bb.5:
movq 24(%rsp), %rax
movq 16(%rsp), %rcx
movq 8(%rsp), %rdx
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
movq %rdx, 72(%rsp)
movl $100, 36(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 36(%rsp), %rax
movq %rax, 120(%rsp)
leaq 160(%rsp), %rdi
leaq 56(%rsp), %rsi
leaq 48(%rsp), %rdx
leaq 40(%rsp), %rcx
callq __hipPopCallConfiguration
movq 160(%rsp), %rsi
movl 168(%rsp), %edx
movq 56(%rsp), %rcx
movl 64(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z6vecAddPfS_S_i, %edi
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_6:
callq hipDeviceSynchronize
movq 8(%rsp), %rsi
movl $400, %edx # imm = 0x190
movq %r15, %rdi
movl $2, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
xorps %xmm0, %xmm0
movaps %xmm0, 128(%rsp)
movaps %xmm0, 112(%rsp)
movaps %xmm0, 96(%rsp)
movw $0, 144(%rsp)
movaps %xmm0, 192(%rsp)
movaps %xmm0, 176(%rsp)
movaps %xmm0, 160(%rsp)
movw $0, 208(%rsp)
xorl %r12d, %r12d
leaq 96(%rsp), %r13
leaq 160(%rsp), %rbp
.p2align 4, 0x90
.LBB1_7: # %.lr.ph.i30
# =>This Inner Loop Header: Depth=1
movss (%rbx,%r12,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
addss (%r14,%r12,4), %xmm0
cvtss2sd %xmm0, %xmm0
movl $.L.str, %esi
movq %r13, %rdi
movsd %xmm0, (%rsp) # 8-byte Spill
movb $1, %al
callq sprintf
movss (%r15,%r12,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str, %esi
movq %rbp, %rdi
movb $1, %al
callq sprintf
movq %r13, %rdi
movq %rbp, %rsi
callq strcmp
testl %eax, %eax
jne .LBB1_8
# %bb.9: # %.critedge.i
# in Loop: Header=BB1_7 Depth=1
incq %r12
cmpq $100, %r12
jne .LBB1_7
jmp .LBB1_10
.LBB1_8:
movss (%r15,%r12,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm1
movl $.L.str.1, %edi
movl %r12d, %esi
movsd (%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
movb $2, %al
callq printf
.LBB1_10: # %_Z6verifyPfS_S_i.exit
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
movq %r15, %rdi
callq free
xorl %eax, %eax
addq $232, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.globl _Z8genInputi # -- Begin function _Z8genInputi
.p2align 4, 0x90
.type _Z8genInputi,@function
_Z8genInputi: # @_Z8genInputi
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
pushq %rax
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %edi, %ebp
movslq %edi, %r14
leaq (,%r14,4), %rdi
callq malloc
movq %rax, %rbx
testl %r14d, %r14d
jle .LBB2_3
# %bb.1: # %.lr.ph.preheader
movl %ebp, %r14d
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB2_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
callq rand
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, 4(%rsp) # 4-byte Spill
movss %xmm0, (%rbx,%r15,4)
callq rand
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss 4(%rsp), %xmm1 # 4-byte Reload
# xmm1 = mem[0],zero,zero,zero
divss %xmm0, %xmm1
movss %xmm1, (%rbx,%r15,4)
incq %r15
cmpq %r15, %r14
jne .LBB2_2
.LBB2_3: # %._crit_edge
movq %rbx, %rax
addq $8, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size _Z8genInputi, .Lfunc_end2-_Z8genInputi
.cfi_endproc
# -- End function
.globl _Z6verifyPfS_S_i # -- Begin function _Z6verifyPfS_S_i
.p2align 4, 0x90
.type _Z6verifyPfS_S_i,@function
_Z6verifyPfS_S_i: # @_Z6verifyPfS_S_i
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $136, %rsp
.cfi_def_cfa_offset 192
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rsi, 8(%rsp) # 8-byte Spill
xorps %xmm0, %xmm0
movaps %xmm0, 112(%rsp)
movaps %xmm0, 96(%rsp)
movaps %xmm0, 80(%rsp)
movw $0, 128(%rsp)
movaps %xmm0, 48(%rsp)
movaps %xmm0, 32(%rsp)
movaps %xmm0, 16(%rsp)
movw $0, 64(%rsp)
testl %ecx, %ecx
jle .LBB3_4
# %bb.1: # %.lr.ph.preheader
movq %rdx, %rbx
movq %rdi, %r15
movl %ecx, %r14d
xorl %r12d, %r12d
leaq 80(%rsp), %r13
leaq 16(%rsp), %rbp
.p2align 4, 0x90
.LBB3_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movss (%r15,%r12,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
movq 8(%rsp), %rax # 8-byte Reload
addss (%rax,%r12,4), %xmm0
cvtss2sd %xmm0, %xmm0
movl $.L.str, %esi
movq %r13, %rdi
movsd %xmm0, (%rsp) # 8-byte Spill
movb $1, %al
callq sprintf
movss (%rbx,%r12,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str, %esi
movq %rbp, %rdi
movb $1, %al
callq sprintf
movq %r13, %rdi
movq %rbp, %rsi
callq strcmp
testl %eax, %eax
jne .LBB3_5
# %bb.3: # %.critedge
# in Loop: Header=BB3_2 Depth=1
incq %r12
cmpq %r12, %r14
jne .LBB3_2
.LBB3_4: # %.loopexit
addq $136, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB3_5:
.cfi_def_cfa_offset 192
movss (%rbx,%r12,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm1
movl $.L.str.1, %edi
movl %r12d, %esi
movsd (%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
movb $2, %al
addq $136, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
jmp printf # TAILCALL
.Lfunc_end3:
.size _Z6verifyPfS_S_i, .Lfunc_end3-_Z6verifyPfS_S_i
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z6vecAddPfS_S_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z6vecAddPfS_S_i,@object # @_Z6vecAddPfS_S_i
.section .rodata,"a",@progbits
.globl _Z6vecAddPfS_S_i
.p2align 3, 0x0
_Z6vecAddPfS_S_i:
.quad _Z21__device_stub__vecAddPfS_S_i
.size _Z6vecAddPfS_S_i, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "%1.8f"
.size .L.str, 6
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "ERROR at index %d, Exp %1.8f Got %1.8f\n"
.size .L.str.1, 40
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z6vecAddPfS_S_i"
.size .L__unnamed_1, 17
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z21__device_stub__vecAddPfS_S_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z6vecAddPfS_S_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00136a3a_00000000-6_divergent_kernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2075:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2075:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z8genInputi
.type _Z8genInputi, @function
_Z8genInputi:
.LFB2071:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $8, %rsp
.cfi_def_cfa_offset 48
movl %edi, %ebx
movslq %edi, %r12
salq $2, %r12
movq %r12, %rdi
call malloc@PLT
movq %rax, %r13
testl %ebx, %ebx
jle .L3
movq %rax, %rbx
addq %rax, %r12
.L5:
call rand@PLT
pxor %xmm2, %xmm2
cvtsi2ssl %eax, %xmm2
movd %xmm2, %ebp
movss %xmm2, (%rbx)
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
movd %ebp, %xmm1
divss %xmm0, %xmm1
movss %xmm1, (%rbx)
addq $4, %rbx
cmpq %r12, %rbx
jne .L5
.L3:
movq %r13, %rax
addq $8, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2071:
.size _Z8genInputi, .-_Z8genInputi
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "%1.8f"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC1:
.string "ERROR at index %d, Exp %1.8f Got %1.8f\n"
.text
.globl _Z6verifyPfS_S_i
.type _Z6verifyPfS_S_i, @function
_Z6verifyPfS_S_i:
.LFB2072:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $168, %rsp
.cfi_def_cfa_offset 224
movq %rsi, 8(%rsp)
movq %fs:40, %rax
movq %rax, 152(%rsp)
xorl %eax, %eax
movq $0, 32(%rsp)
movq $0, 40(%rsp)
movq $0, 48(%rsp)
movq $0, 56(%rsp)
movq $0, 64(%rsp)
movq $0, 72(%rsp)
movw $0, 80(%rsp)
movq $0, 96(%rsp)
movq $0, 104(%rsp)
movq $0, 112(%rsp)
movq $0, 120(%rsp)
movq $0, 128(%rsp)
movq $0, 136(%rsp)
movw $0, 144(%rsp)
testl %ecx, %ecx
jle .L8
movq %rdi, %r15
movq %rdx, %rbp
movslq %ecx, %rax
movq %rax, 24(%rsp)
movl $0, %ebx
leaq .LC0(%rip), %r14
.L11:
movss (%r15,%rbx,4), %xmm0
movq 8(%rsp), %rax
addss (%rax,%rbx,4), %xmm0
pxor %xmm2, %xmm2
cvtss2sd %xmm0, %xmm2
movsd %xmm2, 16(%rsp)
leaq 32(%rsp), %r12
movapd %xmm2, %xmm0
movq %r14, %rcx
movl $50, %edx
movl $2, %esi
movq %r12, %rdi
movl $1, %eax
call __sprintf_chk@PLT
pxor %xmm0, %xmm0
cvtss2sd 0(%rbp), %xmm0
leaq 96(%rsp), %r13
movq %r14, %rcx
movl $50, %edx
movl $2, %esi
movq %r13, %rdi
movl $1, %eax
call __sprintf_chk@PLT
movq %r13, %rsi
movq %r12, %rdi
call strcmp@PLT
testl %eax, %eax
jne .L15
addq $1, %rbx
addq $4, %rbp
movq 24(%rsp), %rax
cmpq %rax, %rbx
jne .L11
jmp .L8
.L15:
pxor %xmm1, %xmm1
cvtss2sd 0(%rbp), %xmm1
movsd 16(%rsp), %xmm0
movl %ebx, %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $2, %eax
call __printf_chk@PLT
.L8:
movq 152(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $168, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L16:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2072:
.size _Z6verifyPfS_S_i, .-_Z6verifyPfS_S_i
.globl _Z30__device_stub__Z6vecAddPfS_S_iPfS_S_i
.type _Z30__device_stub__Z6vecAddPfS_S_iPfS_S_i, @function
_Z30__device_stub__Z6vecAddPfS_S_iPfS_S_i:
.LFB2097:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L21
.L17:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L22
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L21:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z6vecAddPfS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L17
.L22:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2097:
.size _Z30__device_stub__Z6vecAddPfS_S_iPfS_S_i, .-_Z30__device_stub__Z6vecAddPfS_S_iPfS_S_i
.globl _Z6vecAddPfS_S_i
.type _Z6vecAddPfS_S_i, @function
_Z6vecAddPfS_S_i:
.LFB2098:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z30__device_stub__Z6vecAddPfS_S_iPfS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2098:
.size _Z6vecAddPfS_S_i, .-_Z6vecAddPfS_S_i
.globl main
.type main, @function
main:
.LFB2070:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $80, %rsp
.cfi_def_cfa_offset 112
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
leaq 48(%rsp), %rdi
movl $0, %esi
call gettimeofday@PLT
movl 48(%rsp), %edi
call srand@PLT
movl $100, %edi
call _Z8genInputi
movq %rax, %r12
movl $100, %edi
call _Z8genInputi
movq %rax, %rbp
movl $400, %edi
call malloc@PLT
movq %rax, %rbx
movq %rsp, %rdi
movl $400, %esi
call cudaMalloc@PLT
leaq 8(%rsp), %rdi
movl $400, %esi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movl $400, %esi
call cudaMalloc@PLT
movl $1, %ecx
movl $400, %edx
movq %r12, %rsi
movq (%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $400, %edx
movq %rbp, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $64, 36(%rsp)
movl $1, 40(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 36(%rsp), %rdx
movl $1, %ecx
movq 24(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L29
.L26:
call cudaDeviceSynchronize@PLT
movl $2, %ecx
movl $400, %edx
movq 16(%rsp), %rsi
movq %rbx, %rdi
call cudaMemcpy@PLT
movq (%rsp), %rdi
call cudaFree@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movl $100, %ecx
movq %rbx, %rdx
movq %rbp, %rsi
movq %r12, %rdi
call _Z6verifyPfS_S_i
movq %r12, %rdi
call free@PLT
movq %rbp, %rdi
call free@PLT
movq %rbx, %rdi
call free@PLT
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L30
movl $0, %eax
addq $80, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L29:
.cfi_restore_state
movl $100, %ecx
movq 16(%rsp), %rdx
movq 8(%rsp), %rsi
movq (%rsp), %rdi
call _Z30__device_stub__Z6vecAddPfS_S_iPfS_S_i
jmp .L26
.L30:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2070:
.size main, .-main
.section .rodata.str1.1
.LC2:
.string "_Z6vecAddPfS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2100:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z6vecAddPfS_S_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2100:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "divergent_kernel.hip"
.globl _Z21__device_stub__vecAddPfS_S_i # -- Begin function _Z21__device_stub__vecAddPfS_S_i
.p2align 4, 0x90
.type _Z21__device_stub__vecAddPfS_S_i,@function
_Z21__device_stub__vecAddPfS_S_i: # @_Z21__device_stub__vecAddPfS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z6vecAddPfS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z21__device_stub__vecAddPfS_S_i, .Lfunc_end0-_Z21__device_stub__vecAddPfS_S_i
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $232, %rsp
.cfi_def_cfa_offset 288
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
xorl %r14d, %r14d
leaq 216(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
movl 216(%rsp), %edi
callq srand
movl $400, %edi # imm = 0x190
callq malloc
movq %rax, %rbx
.p2align 4, 0x90
.LBB1_1: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
callq rand
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, (%rsp) # 4-byte Spill
movss %xmm0, (%rbx,%r14,4)
callq rand
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss (%rsp), %xmm1 # 4-byte Reload
# xmm1 = mem[0],zero,zero,zero
divss %xmm0, %xmm1
movss %xmm1, (%rbx,%r14,4)
incq %r14
cmpq $100, %r14
jne .LBB1_1
# %bb.2: # %_Z8genInputi.exit
movl $400, %edi # imm = 0x190
callq malloc
movq %rax, %r14
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB1_3: # %.lr.ph.i25
# =>This Inner Loop Header: Depth=1
callq rand
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, (%rsp) # 4-byte Spill
movss %xmm0, (%r14,%r15,4)
callq rand
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss (%rsp), %xmm1 # 4-byte Reload
# xmm1 = mem[0],zero,zero,zero
divss %xmm0, %xmm1
movss %xmm1, (%r14,%r15,4)
incq %r15
cmpq $100, %r15
jne .LBB1_3
# %bb.4: # %_Z8genInputi.exit29
movl $400, %edi # imm = 0x190
callq malloc
movq %rax, %r15
leaq 24(%rsp), %rdi
movl $400, %esi # imm = 0x190
callq hipMalloc
leaq 16(%rsp), %rdi
movl $400, %esi # imm = 0x190
callq hipMalloc
leaq 8(%rsp), %rdi
movl $400, %esi # imm = 0x190
callq hipMalloc
movq 24(%rsp), %rdi
movl $400, %edx # imm = 0x190
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movq 16(%rsp), %rdi
movl $400, %edx # imm = 0x190
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
movabsq $4294967297, %rdi # imm = 0x100000001
leaq 63(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_6
# %bb.5:
movq 24(%rsp), %rax
movq 16(%rsp), %rcx
movq 8(%rsp), %rdx
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
movq %rdx, 72(%rsp)
movl $100, 36(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 36(%rsp), %rax
movq %rax, 120(%rsp)
leaq 160(%rsp), %rdi
leaq 56(%rsp), %rsi
leaq 48(%rsp), %rdx
leaq 40(%rsp), %rcx
callq __hipPopCallConfiguration
movq 160(%rsp), %rsi
movl 168(%rsp), %edx
movq 56(%rsp), %rcx
movl 64(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z6vecAddPfS_S_i, %edi
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_6:
callq hipDeviceSynchronize
movq 8(%rsp), %rsi
movl $400, %edx # imm = 0x190
movq %r15, %rdi
movl $2, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
xorps %xmm0, %xmm0
movaps %xmm0, 128(%rsp)
movaps %xmm0, 112(%rsp)
movaps %xmm0, 96(%rsp)
movw $0, 144(%rsp)
movaps %xmm0, 192(%rsp)
movaps %xmm0, 176(%rsp)
movaps %xmm0, 160(%rsp)
movw $0, 208(%rsp)
xorl %r12d, %r12d
leaq 96(%rsp), %r13
leaq 160(%rsp), %rbp
.p2align 4, 0x90
.LBB1_7: # %.lr.ph.i30
# =>This Inner Loop Header: Depth=1
movss (%rbx,%r12,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
addss (%r14,%r12,4), %xmm0
cvtss2sd %xmm0, %xmm0
movl $.L.str, %esi
movq %r13, %rdi
movsd %xmm0, (%rsp) # 8-byte Spill
movb $1, %al
callq sprintf
movss (%r15,%r12,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str, %esi
movq %rbp, %rdi
movb $1, %al
callq sprintf
movq %r13, %rdi
movq %rbp, %rsi
callq strcmp
testl %eax, %eax
jne .LBB1_8
# %bb.9: # %.critedge.i
# in Loop: Header=BB1_7 Depth=1
incq %r12
cmpq $100, %r12
jne .LBB1_7
jmp .LBB1_10
.LBB1_8:
movss (%r15,%r12,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm1
movl $.L.str.1, %edi
movl %r12d, %esi
movsd (%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
movb $2, %al
callq printf
.LBB1_10: # %_Z6verifyPfS_S_i.exit
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
movq %r15, %rdi
callq free
xorl %eax, %eax
addq $232, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.globl _Z8genInputi # -- Begin function _Z8genInputi
.p2align 4, 0x90
.type _Z8genInputi,@function
_Z8genInputi: # @_Z8genInputi
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
pushq %rax
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %edi, %ebp
movslq %edi, %r14
leaq (,%r14,4), %rdi
callq malloc
movq %rax, %rbx
testl %r14d, %r14d
jle .LBB2_3
# %bb.1: # %.lr.ph.preheader
movl %ebp, %r14d
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB2_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
callq rand
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, 4(%rsp) # 4-byte Spill
movss %xmm0, (%rbx,%r15,4)
callq rand
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss 4(%rsp), %xmm1 # 4-byte Reload
# xmm1 = mem[0],zero,zero,zero
divss %xmm0, %xmm1
movss %xmm1, (%rbx,%r15,4)
incq %r15
cmpq %r15, %r14
jne .LBB2_2
.LBB2_3: # %._crit_edge
movq %rbx, %rax
addq $8, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size _Z8genInputi, .Lfunc_end2-_Z8genInputi
.cfi_endproc
# -- End function
.globl _Z6verifyPfS_S_i # -- Begin function _Z6verifyPfS_S_i
.p2align 4, 0x90
.type _Z6verifyPfS_S_i,@function
_Z6verifyPfS_S_i: # @_Z6verifyPfS_S_i
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $136, %rsp
.cfi_def_cfa_offset 192
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rsi, 8(%rsp) # 8-byte Spill
xorps %xmm0, %xmm0
movaps %xmm0, 112(%rsp)
movaps %xmm0, 96(%rsp)
movaps %xmm0, 80(%rsp)
movw $0, 128(%rsp)
movaps %xmm0, 48(%rsp)
movaps %xmm0, 32(%rsp)
movaps %xmm0, 16(%rsp)
movw $0, 64(%rsp)
testl %ecx, %ecx
jle .LBB3_4
# %bb.1: # %.lr.ph.preheader
movq %rdx, %rbx
movq %rdi, %r15
movl %ecx, %r14d
xorl %r12d, %r12d
leaq 80(%rsp), %r13
leaq 16(%rsp), %rbp
.p2align 4, 0x90
.LBB3_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movss (%r15,%r12,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
movq 8(%rsp), %rax # 8-byte Reload
addss (%rax,%r12,4), %xmm0
cvtss2sd %xmm0, %xmm0
movl $.L.str, %esi
movq %r13, %rdi
movsd %xmm0, (%rsp) # 8-byte Spill
movb $1, %al
callq sprintf
movss (%rbx,%r12,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str, %esi
movq %rbp, %rdi
movb $1, %al
callq sprintf
movq %r13, %rdi
movq %rbp, %rsi
callq strcmp
testl %eax, %eax
jne .LBB3_5
# %bb.3: # %.critedge
# in Loop: Header=BB3_2 Depth=1
incq %r12
cmpq %r12, %r14
jne .LBB3_2
.LBB3_4: # %.loopexit
addq $136, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB3_5:
.cfi_def_cfa_offset 192
movss (%rbx,%r12,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm1
movl $.L.str.1, %edi
movl %r12d, %esi
movsd (%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
movb $2, %al
addq $136, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
jmp printf # TAILCALL
.Lfunc_end3:
.size _Z6verifyPfS_S_i, .Lfunc_end3-_Z6verifyPfS_S_i
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z6vecAddPfS_S_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z6vecAddPfS_S_i,@object # @_Z6vecAddPfS_S_i
.section .rodata,"a",@progbits
.globl _Z6vecAddPfS_S_i
.p2align 3, 0x0
_Z6vecAddPfS_S_i:
.quad _Z21__device_stub__vecAddPfS_S_i
.size _Z6vecAddPfS_S_i, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "%1.8f"
.size .L.str, 6
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "ERROR at index %d, Exp %1.8f Got %1.8f\n"
.size .L.str.1, 40
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z6vecAddPfS_S_i"
.size .L__unnamed_1, 17
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z21__device_stub__vecAddPfS_S_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z6vecAddPfS_S_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | /*
* cuda-udf.cu
*
* Created on: 2019年4月10日
* Author: imdb
*/
#define inputD 16
// X_in: 4 x 1
__device__ inline float perceptron(const char* X_in) {
int* X = (int*)X_in;
int W[16];
int B = 1.0;
// W = (float*)W_in;
// B = (float*)B_in;
int result = 0;
for (int k = 0; k < 16; ++k) {
result = (result + (W[k] * X[k]));
}
// return 1;
return (result + B);
}
// input dimension: 4 x 1
__device__ inline float l2Distance(const char* X_in, const char* Y_in) {
int n = 4;
int* X = (int*)X_in;
int* Y = (int*)Y_in;
int tensor1[inputD];
int tensor_red[1];
for (int ax0 = 0; ax0 < n; ++ax0) {
(( int*)tensor1)[ax0] = (X[ax0] - Y[ax0]);
}
for (int ax01 = 0; ax01 < n; ++ax01) {
(( int*)tensor1)[ax01] = powf((( int*)tensor1)[ax01], 2.000000e+00f);
}
tensor_red[0] = 0.000000e+00f;
for (int k0 = 0; k0 < n; ++k0) {
tensor_red[0] = (tensor_red[0] + (( int*)tensor1)[k0]);
}
return (tensor_red[0] / n);
}
// D = 4
// N = 20
// Points: N * D
// Input: 1 * D
// Find the index of $(Input_in)'s nearest neighbor in $(Points_in)
// $(Input_in) is predefined;
__device__ inline int nearestNeighbour(const char* Points_in )
{
const int D = inputD;
const int N = 20;
int* Points = (int*) Points_in;
int Input[inputD];
int tensor_red_red_temp_v0[1];
int tensor_red_red_temp_v1[1];
int tensor[N * D];
for (int ax0 = 0; ax0 < N; ++ax0)
{
for (int ax1 = 0; ax1 < D; ++ax1)
{
tensor[((ax0 * D) + ax1)] = (Points[((ax0 * D) + ax1)] - Input[ax1]);
}
}
int tensor1[N * D];
for (int ax01 = 0; ax01 < N; ++ax01)
{
for (int ax11 = 0; ax11 < D; ++ax11)
{
((float *)tensor1)[((ax01 * D) + ax11)]=powf(tensor[((ax01 * D) + ax11)], 2.000000e+00f);
}
}
int tensor_red[N];
for (int ax02 = 0; ax02 < N; ++ax02)
{
((int *)tensor_red)[ax02] = 0.000000e+00f;
for (int k1 = 0; k1 < D; ++k1)
{
((int *)tensor_red)[ax02] = (((int *)tensor_red)[ax02] + ((int *)tensor1)[((ax02 * D) + k1)]);
}
}
tensor_red_red_temp_v0[0] = -1;
// tensor_red_red_temp_v1[0] = 3.402823e+38f;
for (int k0 = 0; k0 < N; ++k0)
{
tensor_red_red_temp_v0[0] = ((((int *)tensor_red)[k0] < tensor_red_red_temp_v1[0]) ? k0 : tensor_red_red_temp_v0[0]);
tensor_red_red_temp_v1[0] = ((((int *)tensor_red)[k0] < tensor_red_red_temp_v1[0]) ? ((float *)tensor_red)[k0] : tensor_red_red_temp_v1[0]);
}
return tensor_red_red_temp_v0[0];
}
// X_in: 4 x 1
__device__ inline int logisticRegression(const char* X_in) {
int* X = (int*)X_in;
int W[inputD] ;
int B = 1;
int tmp = 0;
int compute = 1.00;
for (int k = 0; k < inputD; ++k) {
tmp = (tmp + (W[k] * X[k]));
}
tmp = (tmp + B);
return compute / (compute + expf(tmp));
}
__device__ inline int correlation(const char *X_in, const char *Y_in)
{
int *X = (int *)X_in;
int *Y = (int *)Y_in;
int X_red[1];
int tensor1[16];
int Y_red[1];
int tensor2[16];
int tensor_red[1];
int tensor_red1[1];
int tensor_red2[1];
X_red[0] = 0.000000e+00f;
for (int k1 = 0; k1 < 16; ++k1)
{
X_red[0] = (X_red[0] + X[k1]);
}
X_red[0] = (X_red[0] * 6.250000e-02f);
for (int ax1 = 0; ax1 < 16; ++ax1)
{
tensor1[ax1] = (X[ax1] - X_red[0]);
}
Y_red[0] = 0.000000e+00f;
for (int k11 = 0; k11 < 16; ++k11)
{
Y_red[0] = (Y_red[0] + Y[k11]);
}
Y_red[0] = (Y_red[0] * 6.250000e-02f);
for (int ax11 = 0; ax11 < 16; ++ax11)
{
tensor2[ax11] = (Y[ax11] - Y_red[0]);
}
for (int ax12 = 0; ax12 < 16; ++ax12)
{
tensor1[ax12] = (tensor1[ax12] * tensor2[ax12]);
}
tensor_red[0] = 0.000000e+00f;
for (int k12 = 0; k12 < 16; ++k12)
{
tensor_red[0] = (tensor_red[0] + tensor1[k12]);
}
for (int ax13 = 0; ax13 < 16; ++ax13)
{
tensor2[ax13] = (X[ax13] - X_red[0]);
}
for (int ax14 = 0; ax14 < 16; ++ax14)
{
tensor2[ax14] = powf(tensor2[ax14], 2.0);
}
tensor_red1[0] = 0.00001e+00f;
for (int k13 = 0; k13 < 16; ++k13)
{
tensor_red1[0] = (tensor_red1[0] + tensor2[k13]);
}
tensor_red1[0] = (tensor_red1[0] * 6.250000e-02f);
for (int ax15 = 0; ax15 < 16; ++ax15)
{
tensor1[ax15] = (Y[ax15] - Y_red[0]);
}
for (int ax16 = 0; ax16 < 16; ++ax16)
{
tensor1[ax16] = powf(tensor1[ax16], 2);
}
tensor_red2[0] = 0.000000e+00f;
// return 0;
for (int k14 = 0; k14 < 16; ++k14)
{
tensor_red2[0] = (tensor_red2[0] + tensor1[k14]);
}
tensor_red2[0] = (tensor_red2[0] * 6);
tensor_red1[0] = (tensor_red1[0] * tensor_red2[0]);
// return 0;
tensor_red1[0] = sqrtf((float)tensor_red1[0]);
tensor_red1[0] = 0.1;
return (tensor_red[0] / tensor_red1[0]);
}
__device__ inline int rayleighQuotient(const char *X_in)
{
int* X = (int*) X_in;
int W[16*17];
int tensor1[16];
int tensor2[1];
int tensor3[1];
for (int ax1 = 0; ax1 < 16; ++ax1)
{
tensor1[ax1] = 0;
for (int k = 0; k < 16; ++k)
{
tensor1[ax1] = (tensor1[ax1] + (X[k] * W[(ax1 + (k * 16))]));
}
}
tensor2[0] = 1;
for (int k1 = 0; k1 < 16; ++k1)
{
tensor2[0] = (tensor2[0] + (tensor1[k1] * X[k1]));
}
tensor3[0] = 1;
for (int k2 = 0; k2 < 16; ++k2)
{
tensor3[0] = (tensor3[0] + (X[k2] * X[k2]));
}
tensor3[0] = 1;
tensor2[0] = 1;
return (tensor2[0] / tensor3[0]);
}
__device__ inline int crossEntrophy(char *P_in, char *Q_in)
{
int* P = (int*) P_in;
int* Q = (int*) Q_in;
int compute[16];
for (int i1 = 0; i1 < 16; ++i1)
{
compute[i1] = logf((float)P[i1]);
}
int tensor = 1;
for (int k = 0; k < 16; ++k)
{
tensor = (tensor + (Q[k] * compute[k]));
}
return 0;
} | code for sm_80 |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | /*
* cuda-udf.cu
*
* Created on: 2019年4月10日
* Author: imdb
*/
#define inputD 16
// X_in: 4 x 1
__device__ inline float perceptron(const char* X_in) {
int* X = (int*)X_in;
int W[16];
int B = 1.0;
// W = (float*)W_in;
// B = (float*)B_in;
int result = 0;
for (int k = 0; k < 16; ++k) {
result = (result + (W[k] * X[k]));
}
// return 1;
return (result + B);
}
// input dimension: 4 x 1
__device__ inline float l2Distance(const char* X_in, const char* Y_in) {
int n = 4;
int* X = (int*)X_in;
int* Y = (int*)Y_in;
int tensor1[inputD];
int tensor_red[1];
for (int ax0 = 0; ax0 < n; ++ax0) {
(( int*)tensor1)[ax0] = (X[ax0] - Y[ax0]);
}
for (int ax01 = 0; ax01 < n; ++ax01) {
(( int*)tensor1)[ax01] = powf((( int*)tensor1)[ax01], 2.000000e+00f);
}
tensor_red[0] = 0.000000e+00f;
for (int k0 = 0; k0 < n; ++k0) {
tensor_red[0] = (tensor_red[0] + (( int*)tensor1)[k0]);
}
return (tensor_red[0] / n);
}
// D = 4
// N = 20
// Points: N * D
// Input: 1 * D
// Find the index of $(Input_in)'s nearest neighbor in $(Points_in)
// $(Input_in) is predefined;
__device__ inline int nearestNeighbour(const char* Points_in )
{
const int D = inputD;
const int N = 20;
int* Points = (int*) Points_in;
int Input[inputD];
int tensor_red_red_temp_v0[1];
int tensor_red_red_temp_v1[1];
int tensor[N * D];
for (int ax0 = 0; ax0 < N; ++ax0)
{
for (int ax1 = 0; ax1 < D; ++ax1)
{
tensor[((ax0 * D) + ax1)] = (Points[((ax0 * D) + ax1)] - Input[ax1]);
}
}
int tensor1[N * D];
for (int ax01 = 0; ax01 < N; ++ax01)
{
for (int ax11 = 0; ax11 < D; ++ax11)
{
((float *)tensor1)[((ax01 * D) + ax11)]=powf(tensor[((ax01 * D) + ax11)], 2.000000e+00f);
}
}
int tensor_red[N];
for (int ax02 = 0; ax02 < N; ++ax02)
{
((int *)tensor_red)[ax02] = 0.000000e+00f;
for (int k1 = 0; k1 < D; ++k1)
{
((int *)tensor_red)[ax02] = (((int *)tensor_red)[ax02] + ((int *)tensor1)[((ax02 * D) + k1)]);
}
}
tensor_red_red_temp_v0[0] = -1;
// tensor_red_red_temp_v1[0] = 3.402823e+38f;
for (int k0 = 0; k0 < N; ++k0)
{
tensor_red_red_temp_v0[0] = ((((int *)tensor_red)[k0] < tensor_red_red_temp_v1[0]) ? k0 : tensor_red_red_temp_v0[0]);
tensor_red_red_temp_v1[0] = ((((int *)tensor_red)[k0] < tensor_red_red_temp_v1[0]) ? ((float *)tensor_red)[k0] : tensor_red_red_temp_v1[0]);
}
return tensor_red_red_temp_v0[0];
}
// X_in: 4 x 1
__device__ inline int logisticRegression(const char* X_in) {
int* X = (int*)X_in;
int W[inputD] ;
int B = 1;
int tmp = 0;
int compute = 1.00;
for (int k = 0; k < inputD; ++k) {
tmp = (tmp + (W[k] * X[k]));
}
tmp = (tmp + B);
return compute / (compute + expf(tmp));
}
__device__ inline int correlation(const char *X_in, const char *Y_in)
{
int *X = (int *)X_in;
int *Y = (int *)Y_in;
int X_red[1];
int tensor1[16];
int Y_red[1];
int tensor2[16];
int tensor_red[1];
int tensor_red1[1];
int tensor_red2[1];
X_red[0] = 0.000000e+00f;
for (int k1 = 0; k1 < 16; ++k1)
{
X_red[0] = (X_red[0] + X[k1]);
}
X_red[0] = (X_red[0] * 6.250000e-02f);
for (int ax1 = 0; ax1 < 16; ++ax1)
{
tensor1[ax1] = (X[ax1] - X_red[0]);
}
Y_red[0] = 0.000000e+00f;
for (int k11 = 0; k11 < 16; ++k11)
{
Y_red[0] = (Y_red[0] + Y[k11]);
}
Y_red[0] = (Y_red[0] * 6.250000e-02f);
for (int ax11 = 0; ax11 < 16; ++ax11)
{
tensor2[ax11] = (Y[ax11] - Y_red[0]);
}
for (int ax12 = 0; ax12 < 16; ++ax12)
{
tensor1[ax12] = (tensor1[ax12] * tensor2[ax12]);
}
tensor_red[0] = 0.000000e+00f;
for (int k12 = 0; k12 < 16; ++k12)
{
tensor_red[0] = (tensor_red[0] + tensor1[k12]);
}
for (int ax13 = 0; ax13 < 16; ++ax13)
{
tensor2[ax13] = (X[ax13] - X_red[0]);
}
for (int ax14 = 0; ax14 < 16; ++ax14)
{
tensor2[ax14] = powf(tensor2[ax14], 2.0);
}
tensor_red1[0] = 0.00001e+00f;
for (int k13 = 0; k13 < 16; ++k13)
{
tensor_red1[0] = (tensor_red1[0] + tensor2[k13]);
}
tensor_red1[0] = (tensor_red1[0] * 6.250000e-02f);
for (int ax15 = 0; ax15 < 16; ++ax15)
{
tensor1[ax15] = (Y[ax15] - Y_red[0]);
}
for (int ax16 = 0; ax16 < 16; ++ax16)
{
tensor1[ax16] = powf(tensor1[ax16], 2);
}
tensor_red2[0] = 0.000000e+00f;
// return 0;
for (int k14 = 0; k14 < 16; ++k14)
{
tensor_red2[0] = (tensor_red2[0] + tensor1[k14]);
}
tensor_red2[0] = (tensor_red2[0] * 6);
tensor_red1[0] = (tensor_red1[0] * tensor_red2[0]);
// return 0;
tensor_red1[0] = sqrtf((float)tensor_red1[0]);
tensor_red1[0] = 0.1;
return (tensor_red[0] / tensor_red1[0]);
}
__device__ inline int rayleighQuotient(const char *X_in)
{
int* X = (int*) X_in;
int W[16*17];
int tensor1[16];
int tensor2[1];
int tensor3[1];
for (int ax1 = 0; ax1 < 16; ++ax1)
{
tensor1[ax1] = 0;
for (int k = 0; k < 16; ++k)
{
tensor1[ax1] = (tensor1[ax1] + (X[k] * W[(ax1 + (k * 16))]));
}
}
tensor2[0] = 1;
for (int k1 = 0; k1 < 16; ++k1)
{
tensor2[0] = (tensor2[0] + (tensor1[k1] * X[k1]));
}
tensor3[0] = 1;
for (int k2 = 0; k2 < 16; ++k2)
{
tensor3[0] = (tensor3[0] + (X[k2] * X[k2]));
}
tensor3[0] = 1;
tensor2[0] = 1;
return (tensor2[0] / tensor3[0]);
}
__device__ inline int crossEntrophy(char *P_in, char *Q_in)
{
int* P = (int*) P_in;
int* Q = (int*) Q_in;
int compute[16];
for (int i1 = 0; i1 < 16; ++i1)
{
compute[i1] = logf((float)P[i1]);
}
int tensor = 1;
for (int k = 0; k < 16; ++k)
{
tensor = (tensor + (Q[k] * compute[k]));
}
return 0;
} | .file "tmpxft_000c6468_00000000-6_cuda-udf.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2036:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2036:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2059:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2059:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | /*
* cuda-udf.cu
*
* Created on: 2019年4月10日
* Author: imdb
*/
#define inputD 16
// X_in: 4 x 1
__device__ inline float perceptron(const char* X_in) {
int* X = (int*)X_in;
int W[16];
int B = 1.0;
// W = (float*)W_in;
// B = (float*)B_in;
int result = 0;
for (int k = 0; k < 16; ++k) {
result = (result + (W[k] * X[k]));
}
// return 1;
return (result + B);
}
// input dimension: 4 x 1
__device__ inline float l2Distance(const char* X_in, const char* Y_in) {
int n = 4;
int* X = (int*)X_in;
int* Y = (int*)Y_in;
int tensor1[inputD];
int tensor_red[1];
for (int ax0 = 0; ax0 < n; ++ax0) {
(( int*)tensor1)[ax0] = (X[ax0] - Y[ax0]);
}
for (int ax01 = 0; ax01 < n; ++ax01) {
(( int*)tensor1)[ax01] = powf((( int*)tensor1)[ax01], 2.000000e+00f);
}
tensor_red[0] = 0.000000e+00f;
for (int k0 = 0; k0 < n; ++k0) {
tensor_red[0] = (tensor_red[0] + (( int*)tensor1)[k0]);
}
return (tensor_red[0] / n);
}
// D = 4
// N = 20
// Points: N * D
// Input: 1 * D
// Find the index of $(Input_in)'s nearest neighbor in $(Points_in)
// $(Input_in) is predefined;
__device__ inline int nearestNeighbour(const char* Points_in )
{
const int D = inputD;
const int N = 20;
int* Points = (int*) Points_in;
int Input[inputD];
int tensor_red_red_temp_v0[1];
int tensor_red_red_temp_v1[1];
int tensor[N * D];
for (int ax0 = 0; ax0 < N; ++ax0)
{
for (int ax1 = 0; ax1 < D; ++ax1)
{
tensor[((ax0 * D) + ax1)] = (Points[((ax0 * D) + ax1)] - Input[ax1]);
}
}
int tensor1[N * D];
for (int ax01 = 0; ax01 < N; ++ax01)
{
for (int ax11 = 0; ax11 < D; ++ax11)
{
((float *)tensor1)[((ax01 * D) + ax11)]=powf(tensor[((ax01 * D) + ax11)], 2.000000e+00f);
}
}
int tensor_red[N];
for (int ax02 = 0; ax02 < N; ++ax02)
{
((int *)tensor_red)[ax02] = 0.000000e+00f;
for (int k1 = 0; k1 < D; ++k1)
{
((int *)tensor_red)[ax02] = (((int *)tensor_red)[ax02] + ((int *)tensor1)[((ax02 * D) + k1)]);
}
}
tensor_red_red_temp_v0[0] = -1;
// tensor_red_red_temp_v1[0] = 3.402823e+38f;
for (int k0 = 0; k0 < N; ++k0)
{
tensor_red_red_temp_v0[0] = ((((int *)tensor_red)[k0] < tensor_red_red_temp_v1[0]) ? k0 : tensor_red_red_temp_v0[0]);
tensor_red_red_temp_v1[0] = ((((int *)tensor_red)[k0] < tensor_red_red_temp_v1[0]) ? ((float *)tensor_red)[k0] : tensor_red_red_temp_v1[0]);
}
return tensor_red_red_temp_v0[0];
}
// X_in: 4 x 1
__device__ inline int logisticRegression(const char* X_in) {
int* X = (int*)X_in;
int W[inputD] ;
int B = 1;
int tmp = 0;
int compute = 1.00;
for (int k = 0; k < inputD; ++k) {
tmp = (tmp + (W[k] * X[k]));
}
tmp = (tmp + B);
return compute / (compute + expf(tmp));
}
__device__ inline int correlation(const char *X_in, const char *Y_in)
{
int *X = (int *)X_in;
int *Y = (int *)Y_in;
int X_red[1];
int tensor1[16];
int Y_red[1];
int tensor2[16];
int tensor_red[1];
int tensor_red1[1];
int tensor_red2[1];
X_red[0] = 0.000000e+00f;
for (int k1 = 0; k1 < 16; ++k1)
{
X_red[0] = (X_red[0] + X[k1]);
}
X_red[0] = (X_red[0] * 6.250000e-02f);
for (int ax1 = 0; ax1 < 16; ++ax1)
{
tensor1[ax1] = (X[ax1] - X_red[0]);
}
Y_red[0] = 0.000000e+00f;
for (int k11 = 0; k11 < 16; ++k11)
{
Y_red[0] = (Y_red[0] + Y[k11]);
}
Y_red[0] = (Y_red[0] * 6.250000e-02f);
for (int ax11 = 0; ax11 < 16; ++ax11)
{
tensor2[ax11] = (Y[ax11] - Y_red[0]);
}
for (int ax12 = 0; ax12 < 16; ++ax12)
{
tensor1[ax12] = (tensor1[ax12] * tensor2[ax12]);
}
tensor_red[0] = 0.000000e+00f;
for (int k12 = 0; k12 < 16; ++k12)
{
tensor_red[0] = (tensor_red[0] + tensor1[k12]);
}
for (int ax13 = 0; ax13 < 16; ++ax13)
{
tensor2[ax13] = (X[ax13] - X_red[0]);
}
for (int ax14 = 0; ax14 < 16; ++ax14)
{
tensor2[ax14] = powf(tensor2[ax14], 2.0);
}
tensor_red1[0] = 0.00001e+00f;
for (int k13 = 0; k13 < 16; ++k13)
{
tensor_red1[0] = (tensor_red1[0] + tensor2[k13]);
}
tensor_red1[0] = (tensor_red1[0] * 6.250000e-02f);
for (int ax15 = 0; ax15 < 16; ++ax15)
{
tensor1[ax15] = (Y[ax15] - Y_red[0]);
}
for (int ax16 = 0; ax16 < 16; ++ax16)
{
tensor1[ax16] = powf(tensor1[ax16], 2);
}
tensor_red2[0] = 0.000000e+00f;
// return 0;
for (int k14 = 0; k14 < 16; ++k14)
{
tensor_red2[0] = (tensor_red2[0] + tensor1[k14]);
}
tensor_red2[0] = (tensor_red2[0] * 6);
tensor_red1[0] = (tensor_red1[0] * tensor_red2[0]);
// return 0;
tensor_red1[0] = sqrtf((float)tensor_red1[0]);
tensor_red1[0] = 0.1;
return (tensor_red[0] / tensor_red1[0]);
}
__device__ inline int rayleighQuotient(const char *X_in)
{
int* X = (int*) X_in;
int W[16*17];
int tensor1[16];
int tensor2[1];
int tensor3[1];
for (int ax1 = 0; ax1 < 16; ++ax1)
{
tensor1[ax1] = 0;
for (int k = 0; k < 16; ++k)
{
tensor1[ax1] = (tensor1[ax1] + (X[k] * W[(ax1 + (k * 16))]));
}
}
tensor2[0] = 1;
for (int k1 = 0; k1 < 16; ++k1)
{
tensor2[0] = (tensor2[0] + (tensor1[k1] * X[k1]));
}
tensor3[0] = 1;
for (int k2 = 0; k2 < 16; ++k2)
{
tensor3[0] = (tensor3[0] + (X[k2] * X[k2]));
}
tensor3[0] = 1;
tensor2[0] = 1;
return (tensor2[0] / tensor3[0]);
}
__device__ inline int crossEntrophy(char *P_in, char *Q_in)
{
int* P = (int*) P_in;
int* Q = (int*) Q_in;
int compute[16];
for (int i1 = 0; i1 < 16; ++i1)
{
compute[i1] = logf((float)P[i1]);
}
int tensor = 1;
for (int k = 0; k < 16; ++k)
{
tensor = (tensor + (Q[k] * compute[k]));
}
return 0;
} | #include <hip/hip_runtime.h>
/*
* cuda-udf.cu
*
* Created on: 2019年4月10日
* Author: imdb
*/
#define inputD 16
// X_in: 4 x 1
__device__ inline float perceptron(const char* X_in) {
int* X = (int*)X_in;
int W[16];
int B = 1.0;
// W = (float*)W_in;
// B = (float*)B_in;
int result = 0;
for (int k = 0; k < 16; ++k) {
result = (result + (W[k] * X[k]));
}
// return 1;
return (result + B);
}
// input dimension: 4 x 1
__device__ inline float l2Distance(const char* X_in, const char* Y_in) {
int n = 4;
int* X = (int*)X_in;
int* Y = (int*)Y_in;
int tensor1[inputD];
int tensor_red[1];
for (int ax0 = 0; ax0 < n; ++ax0) {
(( int*)tensor1)[ax0] = (X[ax0] - Y[ax0]);
}
for (int ax01 = 0; ax01 < n; ++ax01) {
(( int*)tensor1)[ax01] = powf((( int*)tensor1)[ax01], 2.000000e+00f);
}
tensor_red[0] = 0.000000e+00f;
for (int k0 = 0; k0 < n; ++k0) {
tensor_red[0] = (tensor_red[0] + (( int*)tensor1)[k0]);
}
return (tensor_red[0] / n);
}
// D = 4
// N = 20
// Points: N * D
// Input: 1 * D
// Find the index of $(Input_in)'s nearest neighbor in $(Points_in)
// $(Input_in) is predefined;
__device__ inline int nearestNeighbour(const char* Points_in )
{
const int D = inputD;
const int N = 20;
int* Points = (int*) Points_in;
int Input[inputD];
int tensor_red_red_temp_v0[1];
int tensor_red_red_temp_v1[1];
int tensor[N * D];
for (int ax0 = 0; ax0 < N; ++ax0)
{
for (int ax1 = 0; ax1 < D; ++ax1)
{
tensor[((ax0 * D) + ax1)] = (Points[((ax0 * D) + ax1)] - Input[ax1]);
}
}
int tensor1[N * D];
for (int ax01 = 0; ax01 < N; ++ax01)
{
for (int ax11 = 0; ax11 < D; ++ax11)
{
((float *)tensor1)[((ax01 * D) + ax11)]=powf(tensor[((ax01 * D) + ax11)], 2.000000e+00f);
}
}
int tensor_red[N];
for (int ax02 = 0; ax02 < N; ++ax02)
{
((int *)tensor_red)[ax02] = 0.000000e+00f;
for (int k1 = 0; k1 < D; ++k1)
{
((int *)tensor_red)[ax02] = (((int *)tensor_red)[ax02] + ((int *)tensor1)[((ax02 * D) + k1)]);
}
}
tensor_red_red_temp_v0[0] = -1;
// tensor_red_red_temp_v1[0] = 3.402823e+38f;
for (int k0 = 0; k0 < N; ++k0)
{
tensor_red_red_temp_v0[0] = ((((int *)tensor_red)[k0] < tensor_red_red_temp_v1[0]) ? k0 : tensor_red_red_temp_v0[0]);
tensor_red_red_temp_v1[0] = ((((int *)tensor_red)[k0] < tensor_red_red_temp_v1[0]) ? ((float *)tensor_red)[k0] : tensor_red_red_temp_v1[0]);
}
return tensor_red_red_temp_v0[0];
}
// X_in: 4 x 1
__device__ inline int logisticRegression(const char* X_in) {
int* X = (int*)X_in;
int W[inputD] ;
int B = 1;
int tmp = 0;
int compute = 1.00;
for (int k = 0; k < inputD; ++k) {
tmp = (tmp + (W[k] * X[k]));
}
tmp = (tmp + B);
return compute / (compute + expf(tmp));
}
__device__ inline int correlation(const char *X_in, const char *Y_in)
{
int *X = (int *)X_in;
int *Y = (int *)Y_in;
int X_red[1];
int tensor1[16];
int Y_red[1];
int tensor2[16];
int tensor_red[1];
int tensor_red1[1];
int tensor_red2[1];
X_red[0] = 0.000000e+00f;
for (int k1 = 0; k1 < 16; ++k1)
{
X_red[0] = (X_red[0] + X[k1]);
}
X_red[0] = (X_red[0] * 6.250000e-02f);
for (int ax1 = 0; ax1 < 16; ++ax1)
{
tensor1[ax1] = (X[ax1] - X_red[0]);
}
Y_red[0] = 0.000000e+00f;
for (int k11 = 0; k11 < 16; ++k11)
{
Y_red[0] = (Y_red[0] + Y[k11]);
}
Y_red[0] = (Y_red[0] * 6.250000e-02f);
for (int ax11 = 0; ax11 < 16; ++ax11)
{
tensor2[ax11] = (Y[ax11] - Y_red[0]);
}
for (int ax12 = 0; ax12 < 16; ++ax12)
{
tensor1[ax12] = (tensor1[ax12] * tensor2[ax12]);
}
tensor_red[0] = 0.000000e+00f;
for (int k12 = 0; k12 < 16; ++k12)
{
tensor_red[0] = (tensor_red[0] + tensor1[k12]);
}
for (int ax13 = 0; ax13 < 16; ++ax13)
{
tensor2[ax13] = (X[ax13] - X_red[0]);
}
for (int ax14 = 0; ax14 < 16; ++ax14)
{
tensor2[ax14] = powf(tensor2[ax14], 2.0);
}
tensor_red1[0] = 0.00001e+00f;
for (int k13 = 0; k13 < 16; ++k13)
{
tensor_red1[0] = (tensor_red1[0] + tensor2[k13]);
}
tensor_red1[0] = (tensor_red1[0] * 6.250000e-02f);
for (int ax15 = 0; ax15 < 16; ++ax15)
{
tensor1[ax15] = (Y[ax15] - Y_red[0]);
}
for (int ax16 = 0; ax16 < 16; ++ax16)
{
tensor1[ax16] = powf(tensor1[ax16], 2);
}
tensor_red2[0] = 0.000000e+00f;
// return 0;
for (int k14 = 0; k14 < 16; ++k14)
{
tensor_red2[0] = (tensor_red2[0] + tensor1[k14]);
}
tensor_red2[0] = (tensor_red2[0] * 6);
tensor_red1[0] = (tensor_red1[0] * tensor_red2[0]);
// return 0;
tensor_red1[0] = sqrtf((float)tensor_red1[0]);
tensor_red1[0] = 0.1;
return (tensor_red[0] / tensor_red1[0]);
}
__device__ inline int rayleighQuotient(const char *X_in)
{
int* X = (int*) X_in;
int W[16*17];
int tensor1[16];
int tensor2[1];
int tensor3[1];
for (int ax1 = 0; ax1 < 16; ++ax1)
{
tensor1[ax1] = 0;
for (int k = 0; k < 16; ++k)
{
tensor1[ax1] = (tensor1[ax1] + (X[k] * W[(ax1 + (k * 16))]));
}
}
tensor2[0] = 1;
for (int k1 = 0; k1 < 16; ++k1)
{
tensor2[0] = (tensor2[0] + (tensor1[k1] * X[k1]));
}
tensor3[0] = 1;
for (int k2 = 0; k2 < 16; ++k2)
{
tensor3[0] = (tensor3[0] + (X[k2] * X[k2]));
}
tensor3[0] = 1;
tensor2[0] = 1;
return (tensor2[0] / tensor3[0]);
}
__device__ inline int crossEntrophy(char *P_in, char *Q_in)
{
int* P = (int*) P_in;
int* Q = (int*) Q_in;
int compute[16];
for (int i1 = 0; i1 < 16; ++i1)
{
compute[i1] = logf((float)P[i1]);
}
int tensor = 1;
for (int k = 0; k < 16; ++k)
{
tensor = (tensor + (Q[k] * compute[k]));
}
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
/*
* cuda-udf.cu
*
* Created on: 2019年4月10日
* Author: imdb
*/
#define inputD 16
// X_in: 4 x 1
__device__ inline float perceptron(const char* X_in) {
int* X = (int*)X_in;
int W[16];
int B = 1.0;
// W = (float*)W_in;
// B = (float*)B_in;
int result = 0;
for (int k = 0; k < 16; ++k) {
result = (result + (W[k] * X[k]));
}
// return 1;
return (result + B);
}
// input dimension: 4 x 1
__device__ inline float l2Distance(const char* X_in, const char* Y_in) {
int n = 4;
int* X = (int*)X_in;
int* Y = (int*)Y_in;
int tensor1[inputD];
int tensor_red[1];
for (int ax0 = 0; ax0 < n; ++ax0) {
(( int*)tensor1)[ax0] = (X[ax0] - Y[ax0]);
}
for (int ax01 = 0; ax01 < n; ++ax01) {
(( int*)tensor1)[ax01] = powf((( int*)tensor1)[ax01], 2.000000e+00f);
}
tensor_red[0] = 0.000000e+00f;
for (int k0 = 0; k0 < n; ++k0) {
tensor_red[0] = (tensor_red[0] + (( int*)tensor1)[k0]);
}
return (tensor_red[0] / n);
}
// D = 4
// N = 20
// Points: N * D
// Input: 1 * D
// Find the index of $(Input_in)'s nearest neighbor in $(Points_in)
// $(Input_in) is predefined;
__device__ inline int nearestNeighbour(const char* Points_in )
{
const int D = inputD;
const int N = 20;
int* Points = (int*) Points_in;
int Input[inputD];
int tensor_red_red_temp_v0[1];
int tensor_red_red_temp_v1[1];
int tensor[N * D];
for (int ax0 = 0; ax0 < N; ++ax0)
{
for (int ax1 = 0; ax1 < D; ++ax1)
{
tensor[((ax0 * D) + ax1)] = (Points[((ax0 * D) + ax1)] - Input[ax1]);
}
}
int tensor1[N * D];
for (int ax01 = 0; ax01 < N; ++ax01)
{
for (int ax11 = 0; ax11 < D; ++ax11)
{
((float *)tensor1)[((ax01 * D) + ax11)]=powf(tensor[((ax01 * D) + ax11)], 2.000000e+00f);
}
}
int tensor_red[N];
for (int ax02 = 0; ax02 < N; ++ax02)
{
((int *)tensor_red)[ax02] = 0.000000e+00f;
for (int k1 = 0; k1 < D; ++k1)
{
((int *)tensor_red)[ax02] = (((int *)tensor_red)[ax02] + ((int *)tensor1)[((ax02 * D) + k1)]);
}
}
tensor_red_red_temp_v0[0] = -1;
// tensor_red_red_temp_v1[0] = 3.402823e+38f;
for (int k0 = 0; k0 < N; ++k0)
{
tensor_red_red_temp_v0[0] = ((((int *)tensor_red)[k0] < tensor_red_red_temp_v1[0]) ? k0 : tensor_red_red_temp_v0[0]);
tensor_red_red_temp_v1[0] = ((((int *)tensor_red)[k0] < tensor_red_red_temp_v1[0]) ? ((float *)tensor_red)[k0] : tensor_red_red_temp_v1[0]);
}
return tensor_red_red_temp_v0[0];
}
// X_in: 4 x 1
__device__ inline int logisticRegression(const char* X_in) {
int* X = (int*)X_in;
int W[inputD] ;
int B = 1;
int tmp = 0;
int compute = 1.00;
for (int k = 0; k < inputD; ++k) {
tmp = (tmp + (W[k] * X[k]));
}
tmp = (tmp + B);
return compute / (compute + expf(tmp));
}
__device__ inline int correlation(const char *X_in, const char *Y_in)
{
int *X = (int *)X_in;
int *Y = (int *)Y_in;
int X_red[1];
int tensor1[16];
int Y_red[1];
int tensor2[16];
int tensor_red[1];
int tensor_red1[1];
int tensor_red2[1];
X_red[0] = 0.000000e+00f;
for (int k1 = 0; k1 < 16; ++k1)
{
X_red[0] = (X_red[0] + X[k1]);
}
X_red[0] = (X_red[0] * 6.250000e-02f);
for (int ax1 = 0; ax1 < 16; ++ax1)
{
tensor1[ax1] = (X[ax1] - X_red[0]);
}
Y_red[0] = 0.000000e+00f;
for (int k11 = 0; k11 < 16; ++k11)
{
Y_red[0] = (Y_red[0] + Y[k11]);
}
Y_red[0] = (Y_red[0] * 6.250000e-02f);
for (int ax11 = 0; ax11 < 16; ++ax11)
{
tensor2[ax11] = (Y[ax11] - Y_red[0]);
}
for (int ax12 = 0; ax12 < 16; ++ax12)
{
tensor1[ax12] = (tensor1[ax12] * tensor2[ax12]);
}
tensor_red[0] = 0.000000e+00f;
for (int k12 = 0; k12 < 16; ++k12)
{
tensor_red[0] = (tensor_red[0] + tensor1[k12]);
}
for (int ax13 = 0; ax13 < 16; ++ax13)
{
tensor2[ax13] = (X[ax13] - X_red[0]);
}
for (int ax14 = 0; ax14 < 16; ++ax14)
{
tensor2[ax14] = powf(tensor2[ax14], 2.0);
}
tensor_red1[0] = 0.00001e+00f;
for (int k13 = 0; k13 < 16; ++k13)
{
tensor_red1[0] = (tensor_red1[0] + tensor2[k13]);
}
tensor_red1[0] = (tensor_red1[0] * 6.250000e-02f);
for (int ax15 = 0; ax15 < 16; ++ax15)
{
tensor1[ax15] = (Y[ax15] - Y_red[0]);
}
for (int ax16 = 0; ax16 < 16; ++ax16)
{
tensor1[ax16] = powf(tensor1[ax16], 2);
}
tensor_red2[0] = 0.000000e+00f;
// return 0;
for (int k14 = 0; k14 < 16; ++k14)
{
tensor_red2[0] = (tensor_red2[0] + tensor1[k14]);
}
tensor_red2[0] = (tensor_red2[0] * 6);
tensor_red1[0] = (tensor_red1[0] * tensor_red2[0]);
// return 0;
tensor_red1[0] = sqrtf((float)tensor_red1[0]);
tensor_red1[0] = 0.1;
return (tensor_red[0] / tensor_red1[0]);
}
__device__ inline int rayleighQuotient(const char *X_in)
{
int* X = (int*) X_in;
int W[16*17];
int tensor1[16];
int tensor2[1];
int tensor3[1];
for (int ax1 = 0; ax1 < 16; ++ax1)
{
tensor1[ax1] = 0;
for (int k = 0; k < 16; ++k)
{
tensor1[ax1] = (tensor1[ax1] + (X[k] * W[(ax1 + (k * 16))]));
}
}
tensor2[0] = 1;
for (int k1 = 0; k1 < 16; ++k1)
{
tensor2[0] = (tensor2[0] + (tensor1[k1] * X[k1]));
}
tensor3[0] = 1;
for (int k2 = 0; k2 < 16; ++k2)
{
tensor3[0] = (tensor3[0] + (X[k2] * X[k2]));
}
tensor3[0] = 1;
tensor2[0] = 1;
return (tensor2[0] / tensor3[0]);
}
__device__ inline int crossEntrophy(char *P_in, char *Q_in)
{
int* P = (int*) P_in;
int* Q = (int*) Q_in;
int compute[16];
for (int i1 = 0; i1 < 16; ++i1)
{
compute[i1] = logf((float)P[i1]);
}
int tensor = 1;
for (int k = 0; k < 16; ++k)
{
tensor = (tensor + (Q[k] * compute[k]));
}
return 0;
} | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
/*
* cuda-udf.cu
*
* Created on: 2019年4月10日
* Author: imdb
*/
#define inputD 16
// X_in: 4 x 1
__device__ inline float perceptron(const char* X_in) {
int* X = (int*)X_in;
int W[16];
int B = 1.0;
// W = (float*)W_in;
// B = (float*)B_in;
int result = 0;
for (int k = 0; k < 16; ++k) {
result = (result + (W[k] * X[k]));
}
// return 1;
return (result + B);
}
// input dimension: 4 x 1
__device__ inline float l2Distance(const char* X_in, const char* Y_in) {
int n = 4;
int* X = (int*)X_in;
int* Y = (int*)Y_in;
int tensor1[inputD];
int tensor_red[1];
for (int ax0 = 0; ax0 < n; ++ax0) {
(( int*)tensor1)[ax0] = (X[ax0] - Y[ax0]);
}
for (int ax01 = 0; ax01 < n; ++ax01) {
(( int*)tensor1)[ax01] = powf((( int*)tensor1)[ax01], 2.000000e+00f);
}
tensor_red[0] = 0.000000e+00f;
for (int k0 = 0; k0 < n; ++k0) {
tensor_red[0] = (tensor_red[0] + (( int*)tensor1)[k0]);
}
return (tensor_red[0] / n);
}
// D = 4
// N = 20
// Points: N * D
// Input: 1 * D
// Find the index of $(Input_in)'s nearest neighbor in $(Points_in)
// $(Input_in) is predefined;
__device__ inline int nearestNeighbour(const char* Points_in )
{
const int D = inputD;
const int N = 20;
int* Points = (int*) Points_in;
int Input[inputD];
int tensor_red_red_temp_v0[1];
int tensor_red_red_temp_v1[1];
int tensor[N * D];
for (int ax0 = 0; ax0 < N; ++ax0)
{
for (int ax1 = 0; ax1 < D; ++ax1)
{
tensor[((ax0 * D) + ax1)] = (Points[((ax0 * D) + ax1)] - Input[ax1]);
}
}
int tensor1[N * D];
for (int ax01 = 0; ax01 < N; ++ax01)
{
for (int ax11 = 0; ax11 < D; ++ax11)
{
((float *)tensor1)[((ax01 * D) + ax11)]=powf(tensor[((ax01 * D) + ax11)], 2.000000e+00f);
}
}
int tensor_red[N];
for (int ax02 = 0; ax02 < N; ++ax02)
{
((int *)tensor_red)[ax02] = 0.000000e+00f;
for (int k1 = 0; k1 < D; ++k1)
{
((int *)tensor_red)[ax02] = (((int *)tensor_red)[ax02] + ((int *)tensor1)[((ax02 * D) + k1)]);
}
}
tensor_red_red_temp_v0[0] = -1;
// tensor_red_red_temp_v1[0] = 3.402823e+38f;
for (int k0 = 0; k0 < N; ++k0)
{
tensor_red_red_temp_v0[0] = ((((int *)tensor_red)[k0] < tensor_red_red_temp_v1[0]) ? k0 : tensor_red_red_temp_v0[0]);
tensor_red_red_temp_v1[0] = ((((int *)tensor_red)[k0] < tensor_red_red_temp_v1[0]) ? ((float *)tensor_red)[k0] : tensor_red_red_temp_v1[0]);
}
return tensor_red_red_temp_v0[0];
}
// X_in: 4 x 1
__device__ inline int logisticRegression(const char* X_in) {
int* X = (int*)X_in;
int W[inputD] ;
int B = 1;
int tmp = 0;
int compute = 1.00;
for (int k = 0; k < inputD; ++k) {
tmp = (tmp + (W[k] * X[k]));
}
tmp = (tmp + B);
return compute / (compute + expf(tmp));
}
__device__ inline int correlation(const char *X_in, const char *Y_in)
{
int *X = (int *)X_in;
int *Y = (int *)Y_in;
int X_red[1];
int tensor1[16];
int Y_red[1];
int tensor2[16];
int tensor_red[1];
int tensor_red1[1];
int tensor_red2[1];
X_red[0] = 0.000000e+00f;
for (int k1 = 0; k1 < 16; ++k1)
{
X_red[0] = (X_red[0] + X[k1]);
}
X_red[0] = (X_red[0] * 6.250000e-02f);
for (int ax1 = 0; ax1 < 16; ++ax1)
{
tensor1[ax1] = (X[ax1] - X_red[0]);
}
Y_red[0] = 0.000000e+00f;
for (int k11 = 0; k11 < 16; ++k11)
{
Y_red[0] = (Y_red[0] + Y[k11]);
}
Y_red[0] = (Y_red[0] * 6.250000e-02f);
for (int ax11 = 0; ax11 < 16; ++ax11)
{
tensor2[ax11] = (Y[ax11] - Y_red[0]);
}
for (int ax12 = 0; ax12 < 16; ++ax12)
{
tensor1[ax12] = (tensor1[ax12] * tensor2[ax12]);
}
tensor_red[0] = 0.000000e+00f;
for (int k12 = 0; k12 < 16; ++k12)
{
tensor_red[0] = (tensor_red[0] + tensor1[k12]);
}
for (int ax13 = 0; ax13 < 16; ++ax13)
{
tensor2[ax13] = (X[ax13] - X_red[0]);
}
for (int ax14 = 0; ax14 < 16; ++ax14)
{
tensor2[ax14] = powf(tensor2[ax14], 2.0);
}
tensor_red1[0] = 0.00001e+00f;
for (int k13 = 0; k13 < 16; ++k13)
{
tensor_red1[0] = (tensor_red1[0] + tensor2[k13]);
}
tensor_red1[0] = (tensor_red1[0] * 6.250000e-02f);
for (int ax15 = 0; ax15 < 16; ++ax15)
{
tensor1[ax15] = (Y[ax15] - Y_red[0]);
}
for (int ax16 = 0; ax16 < 16; ++ax16)
{
tensor1[ax16] = powf(tensor1[ax16], 2);
}
tensor_red2[0] = 0.000000e+00f;
// return 0;
for (int k14 = 0; k14 < 16; ++k14)
{
tensor_red2[0] = (tensor_red2[0] + tensor1[k14]);
}
tensor_red2[0] = (tensor_red2[0] * 6);
tensor_red1[0] = (tensor_red1[0] * tensor_red2[0]);
// return 0;
tensor_red1[0] = sqrtf((float)tensor_red1[0]);
tensor_red1[0] = 0.1;
return (tensor_red[0] / tensor_red1[0]);
}
__device__ inline int rayleighQuotient(const char *X_in)
{
int* X = (int*) X_in;
int W[16*17];
int tensor1[16];
int tensor2[1];
int tensor3[1];
for (int ax1 = 0; ax1 < 16; ++ax1)
{
tensor1[ax1] = 0;
for (int k = 0; k < 16; ++k)
{
tensor1[ax1] = (tensor1[ax1] + (X[k] * W[(ax1 + (k * 16))]));
}
}
tensor2[0] = 1;
for (int k1 = 0; k1 < 16; ++k1)
{
tensor2[0] = (tensor2[0] + (tensor1[k1] * X[k1]));
}
tensor3[0] = 1;
for (int k2 = 0; k2 < 16; ++k2)
{
tensor3[0] = (tensor3[0] + (X[k2] * X[k2]));
}
tensor3[0] = 1;
tensor2[0] = 1;
return (tensor2[0] / tensor3[0]);
}
__device__ inline int crossEntrophy(char *P_in, char *Q_in)
{
int* P = (int*) P_in;
int* Q = (int*) Q_in;
int compute[16];
for (int i1 = 0; i1 < 16; ++i1)
{
compute[i1] = logf((float)P[i1]);
}
int tensor = 1;
for (int k = 0; k < 16; ++k)
{
tensor = (tensor + (Q[k] * compute[k]));
}
return 0;
} | .text
.file "cuda-udf.hip"
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80 | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000c6468_00000000-6_cuda-udf.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2036:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2036:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2059:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2059:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "cuda-udf.hip"
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <iostream>
int main(void){
int devcount;
int deviceInUse;
int attrVal;
int canAccess;
cudaGetDeviceCount(&devcount);
printf("This machine has %d CUDA capable GPUs\n", devcount);
printf("------------------------\n");
int deviceGrid[devcount][8];
cudaSetDevice(0);
cudaGetDevice(&deviceInUse);
printf("This machine is currently using device %d\n", deviceInUse);
printf("------------------------\n");
if (devcount > 1) {
printf("Changing device in use.....\n");
for (int i = 1; i < devcount; i++) {
cudaSetDevice(i);
cudaGetDevice(&deviceInUse);
printf("This machine is currently using device %d\n", deviceInUse);
printf("------------------------\n");
}
printf("Checking cross talk between devices.....\n");
for (int i = 0; i < devcount; i++) {
if (i + 1 < devcount) {
cudaDeviceCanAccessPeer(&canAccess, i, i + 1);
if (canAccess == 1) {
printf("CUDA Device %d can access CUDA Device %d\n", i, i + 1);
} else {
printf("ERROR CUDA Device %d can't access CUDA Device %d ensure that this devices supports P2P communication\n", i, i + 1);
}
} else {
cudaDeviceCanAccessPeer(&canAccess, i, i - 1);
if (canAccess == 1){
printf("CUDA Device %d can access CUDA Device %d\n", i, i - 1);
} else {
printf("ERROR CUDA Device %d can't access CUDA Device %d ensure that this devices supports P2P communication\n", i, i - 1);
}
}
}
}
printf("------------------------\n");
printf("Building block matrix...\n");
printf("------------------------\n");
for (int i = 0; i < devcount; i++){
cudaDeviceGetAttribute(&attrVal, cudaDevAttrMaxThreadsPerBlock, i);
deviceGrid[i][0] = attrVal;
cudaDeviceGetAttribute(&attrVal, cudaDevAttrMaxBlockDimX, i);
deviceGrid[i][1] = attrVal;
cudaDeviceGetAttribute(&attrVal, cudaDevAttrMaxBlockDimY, i);
deviceGrid[i][2] = attrVal;
cudaDeviceGetAttribute(&attrVal, cudaDevAttrMaxBlockDimZ, i);
deviceGrid[i][3] = attrVal;
cudaDeviceGetAttribute(&attrVal, cudaDevAttrMaxGridDimX, i);
deviceGrid[i][4] = attrVal;
cudaDeviceGetAttribute(&attrVal, cudaDevAttrMaxGridDimY, i);
deviceGrid[i][5] = attrVal;
cudaDeviceGetAttribute(&attrVal, cudaDevAttrMaxGridDimZ, i);
deviceGrid[i][6] = attrVal;
}
for (int i = 0; i < devcount; i++) {
printf("Device %d has the following properties\n", i);
printf("Max Threads Per Block: %d\n", deviceGrid[i][0]);
printf("Max Blocks X: %d\n", deviceGrid[i][1]);
printf("Max Blocks Y: %d\n", deviceGrid[i][2]);
printf("Max Blocks Z: %d\n", deviceGrid[i][3]);
printf("Max Grid X: %d\n", deviceGrid[i][4]);
printf("Max Grid Y: %d\n", deviceGrid[i][5]);
printf("Max Grid Z: %d\n", deviceGrid[i][6]);
printf("----------------------------------\n");
}
} | code for sm_80 |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <iostream>
int main(void){
int devcount;
int deviceInUse;
int attrVal;
int canAccess;
cudaGetDeviceCount(&devcount);
printf("This machine has %d CUDA capable GPUs\n", devcount);
printf("------------------------\n");
int deviceGrid[devcount][8];
cudaSetDevice(0);
cudaGetDevice(&deviceInUse);
printf("This machine is currently using device %d\n", deviceInUse);
printf("------------------------\n");
if (devcount > 1) {
printf("Changing device in use.....\n");
for (int i = 1; i < devcount; i++) {
cudaSetDevice(i);
cudaGetDevice(&deviceInUse);
printf("This machine is currently using device %d\n", deviceInUse);
printf("------------------------\n");
}
printf("Checking cross talk between devices.....\n");
for (int i = 0; i < devcount; i++) {
if (i + 1 < devcount) {
cudaDeviceCanAccessPeer(&canAccess, i, i + 1);
if (canAccess == 1) {
printf("CUDA Device %d can access CUDA Device %d\n", i, i + 1);
} else {
printf("ERROR CUDA Device %d can't access CUDA Device %d ensure that this devices supports P2P communication\n", i, i + 1);
}
} else {
cudaDeviceCanAccessPeer(&canAccess, i, i - 1);
if (canAccess == 1){
printf("CUDA Device %d can access CUDA Device %d\n", i, i - 1);
} else {
printf("ERROR CUDA Device %d can't access CUDA Device %d ensure that this devices supports P2P communication\n", i, i - 1);
}
}
}
}
printf("------------------------\n");
printf("Building block matrix...\n");
printf("------------------------\n");
for (int i = 0; i < devcount; i++){
cudaDeviceGetAttribute(&attrVal, cudaDevAttrMaxThreadsPerBlock, i);
deviceGrid[i][0] = attrVal;
cudaDeviceGetAttribute(&attrVal, cudaDevAttrMaxBlockDimX, i);
deviceGrid[i][1] = attrVal;
cudaDeviceGetAttribute(&attrVal, cudaDevAttrMaxBlockDimY, i);
deviceGrid[i][2] = attrVal;
cudaDeviceGetAttribute(&attrVal, cudaDevAttrMaxBlockDimZ, i);
deviceGrid[i][3] = attrVal;
cudaDeviceGetAttribute(&attrVal, cudaDevAttrMaxGridDimX, i);
deviceGrid[i][4] = attrVal;
cudaDeviceGetAttribute(&attrVal, cudaDevAttrMaxGridDimY, i);
deviceGrid[i][5] = attrVal;
cudaDeviceGetAttribute(&attrVal, cudaDevAttrMaxGridDimZ, i);
deviceGrid[i][6] = attrVal;
}
for (int i = 0; i < devcount; i++) {
printf("Device %d has the following properties\n", i);
printf("Max Threads Per Block: %d\n", deviceGrid[i][0]);
printf("Max Blocks X: %d\n", deviceGrid[i][1]);
printf("Max Blocks Y: %d\n", deviceGrid[i][2]);
printf("Max Blocks Z: %d\n", deviceGrid[i][3]);
printf("Max Grid X: %d\n", deviceGrid[i][4]);
printf("Max Grid Y: %d\n", deviceGrid[i][5]);
printf("Max Grid Z: %d\n", deviceGrid[i][6]);
printf("----------------------------------\n");
}
} | .file "tmpxft_0015e627_00000000-6_cuda_test.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3672:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3672:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "This machine has %d CUDA capable GPUs\n"
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "------------------------\n"
.section .rodata.str1.8
.align 8
.LC2:
.string "This machine is currently using device %d\n"
.section .rodata.str1.1
.LC3:
.string "Changing device in use.....\n"
.section .rodata.str1.8
.align 8
.LC4:
.string "Checking cross talk between devices.....\n"
.align 8
.LC5:
.string "CUDA Device %d can access CUDA Device %d\n"
.align 8
.LC6:
.string "ERROR CUDA Device %d can't access CUDA Device %d ensure that this devices supports P2P communication\n"
.section .rodata.str1.1
.LC7:
.string "Building block matrix...\n"
.section .rodata.str1.8
.align 8
.LC8:
.string "Device %d has the following properties\n"
.section .rodata.str1.1
.LC9:
.string "Max Threads Per Block: %d\n"
.LC10:
.string "Max Blocks X: %d\n"
.LC11:
.string "Max Blocks Y: %d\n"
.LC12:
.string "Max Blocks Z: %d\n"
.LC13:
.string "Max Grid X: %d\n"
.LC14:
.string "Max Grid Y: %d\n"
.LC15:
.string "Max Grid Z: %d\n"
.section .rodata.str1.8
.align 8
.LC16:
.string "----------------------------------\n"
.text
.globl main
.type main, @function
main:
.LFB3669:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
movq %rsp, %rbp
.cfi_def_cfa_register 6
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $56, %rsp
.cfi_offset 15, -24
.cfi_offset 14, -32
.cfi_offset 13, -40
.cfi_offset 12, -48
.cfi_offset 3, -56
movq %fs:40, %rax
movq %rax, -56(%rbp)
xorl %eax, %eax
leaq -72(%rbp), %rdi
call cudaGetDeviceCount@PLT
movl -72(%rbp), %edx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movslq -72(%rbp), %rax
salq $5, %rax
movq %rax, %rcx
andq $-4096, %rcx
movq %rsp, %rdx
subq %rcx, %rdx
.L4:
cmpq %rdx, %rsp
je .L5
subq $4096, %rsp
orq $0, 4088(%rsp)
jmp .L4
.L5:
movq %rax, %rdx
andl $4095, %edx
subq %rdx, %rsp
testq %rdx, %rdx
je .L6
orq $0, -8(%rsp,%rdx)
.L6:
movq %rsp, -88(%rbp)
movl $0, %edi
call cudaSetDevice@PLT
leaq -68(%rbp), %rdi
call cudaGetDevice@PLT
movl -68(%rbp), %edx
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
cmpl $1, -72(%rbp)
jg .L24
.L7:
leaq .LC1(%rip), %rbx
movq %rbx, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %rbx, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
cmpl $0, -72(%rbp)
jle .L15
movq -88(%rbp), %r13
movq %r13, %rbx
movl $0, %r12d
leaq -64(%rbp), %r14
.L16:
movl %r12d, %edx
movl $1, %esi
movq %r14, %rdi
call cudaDeviceGetAttribute@PLT
movl -64(%rbp), %eax
movl %eax, 0(%r13)
movl %r12d, %edx
movl $2, %esi
movq %r14, %rdi
call cudaDeviceGetAttribute@PLT
movl -64(%rbp), %eax
movl %eax, 4(%r13)
movl %r12d, %edx
movl $3, %esi
movq %r14, %rdi
call cudaDeviceGetAttribute@PLT
movl -64(%rbp), %eax
movl %eax, 8(%r13)
movl %r12d, %edx
movl $4, %esi
movq %r14, %rdi
call cudaDeviceGetAttribute@PLT
movl -64(%rbp), %eax
movl %eax, 12(%r13)
movl %r12d, %edx
movl $5, %esi
movq %r14, %rdi
call cudaDeviceGetAttribute@PLT
movl -64(%rbp), %eax
movl %eax, 16(%r13)
movl %r12d, %edx
movl $6, %esi
movq %r14, %rdi
call cudaDeviceGetAttribute@PLT
movl -64(%rbp), %eax
movl %eax, 20(%r13)
movl %r12d, %edx
movl $7, %esi
movq %r14, %rdi
call cudaDeviceGetAttribute@PLT
movl -64(%rbp), %eax
movl %eax, 24(%r13)
addl $1, %r12d
movl -72(%rbp), %eax
addq $32, %r13
cmpl %r12d, %eax
jg .L16
testl %eax, %eax
jle .L15
movl $0, %r12d
leaq .LC8(%rip), %r15
leaq .LC9(%rip), %r14
leaq .LC10(%rip), %r13
.L17:
movl %r12d, %edx
movq %r15, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl (%rbx), %edx
movq %r14, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 4(%rbx), %edx
movq %r13, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 8(%rbx), %edx
leaq .LC11(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 12(%rbx), %edx
leaq .LC12(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 16(%rbx), %edx
leaq .LC13(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 20(%rbx), %edx
leaq .LC14(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 24(%rbx), %edx
leaq .LC15(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq .LC16(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addl $1, %r12d
addq $32, %rbx
cmpl %r12d, -72(%rbp)
jg .L17
.L15:
movq -56(%rbp), %rax
subq %fs:40, %rax
jne .L25
movl $0, %eax
leaq -40(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
.cfi_remember_state
.cfi_def_cfa 7, 8
ret
.L24:
.cfi_restore_state
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
cmpl $1, -72(%rbp)
jle .L8
movl $1, %ebx
leaq -68(%rbp), %r14
leaq .LC2(%rip), %r13
leaq .LC1(%rip), %r12
.L9:
movl %ebx, %edi
call cudaSetDevice@PLT
movq %r14, %rdi
call cudaGetDevice@PLT
movl -68(%rbp), %edx
movq %r13, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %r12, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addl $1, %ebx
cmpl %ebx, -72(%rbp)
jg .L9
.L8:
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl -72(%rbp), %eax
testl %eax, %eax
jle .L7
movl $0, %ebx
leaq -60(%rbp), %r14
leaq .LC6(%rip), %r15
jmp .L14
.L27:
movl %ebx, %ecx
movl %r12d, %edx
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L12
.L10:
leal -1(%r12), %r13d
movl %r13d, %edx
movl %r12d, %esi
movq %r14, %rdi
call cudaDeviceCanAccessPeer@PLT
cmpl $1, -60(%rbp)
je .L26
movl %r13d, %ecx
movl %r12d, %edx
movq %r15, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
.L12:
movl -72(%rbp), %eax
cmpl %eax, %ebx
jge .L7
.L14:
movl %ebx, %r12d
addl $1, %ebx
cmpl %eax, %ebx
jge .L10
movl %ebx, %edx
movl %r12d, %esi
movq %r14, %rdi
call cudaDeviceCanAccessPeer@PLT
cmpl $1, -60(%rbp)
je .L27
movl %ebx, %ecx
movl %r12d, %edx
movq %r15, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L12
.L26:
movl %r13d, %ecx
movl %r12d, %edx
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L12
.L25:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3669:
.size main, .-main
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3695:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3695:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <iostream>
int main(void){
int devcount;
int deviceInUse;
int attrVal;
int canAccess;
cudaGetDeviceCount(&devcount);
printf("This machine has %d CUDA capable GPUs\n", devcount);
printf("------------------------\n");
int deviceGrid[devcount][8];
cudaSetDevice(0);
cudaGetDevice(&deviceInUse);
printf("This machine is currently using device %d\n", deviceInUse);
printf("------------------------\n");
if (devcount > 1) {
printf("Changing device in use.....\n");
for (int i = 1; i < devcount; i++) {
cudaSetDevice(i);
cudaGetDevice(&deviceInUse);
printf("This machine is currently using device %d\n", deviceInUse);
printf("------------------------\n");
}
printf("Checking cross talk between devices.....\n");
for (int i = 0; i < devcount; i++) {
if (i + 1 < devcount) {
cudaDeviceCanAccessPeer(&canAccess, i, i + 1);
if (canAccess == 1) {
printf("CUDA Device %d can access CUDA Device %d\n", i, i + 1);
} else {
printf("ERROR CUDA Device %d can't access CUDA Device %d ensure that this devices supports P2P communication\n", i, i + 1);
}
} else {
cudaDeviceCanAccessPeer(&canAccess, i, i - 1);
if (canAccess == 1){
printf("CUDA Device %d can access CUDA Device %d\n", i, i - 1);
} else {
printf("ERROR CUDA Device %d can't access CUDA Device %d ensure that this devices supports P2P communication\n", i, i - 1);
}
}
}
}
printf("------------------------\n");
printf("Building block matrix...\n");
printf("------------------------\n");
for (int i = 0; i < devcount; i++){
cudaDeviceGetAttribute(&attrVal, cudaDevAttrMaxThreadsPerBlock, i);
deviceGrid[i][0] = attrVal;
cudaDeviceGetAttribute(&attrVal, cudaDevAttrMaxBlockDimX, i);
deviceGrid[i][1] = attrVal;
cudaDeviceGetAttribute(&attrVal, cudaDevAttrMaxBlockDimY, i);
deviceGrid[i][2] = attrVal;
cudaDeviceGetAttribute(&attrVal, cudaDevAttrMaxBlockDimZ, i);
deviceGrid[i][3] = attrVal;
cudaDeviceGetAttribute(&attrVal, cudaDevAttrMaxGridDimX, i);
deviceGrid[i][4] = attrVal;
cudaDeviceGetAttribute(&attrVal, cudaDevAttrMaxGridDimY, i);
deviceGrid[i][5] = attrVal;
cudaDeviceGetAttribute(&attrVal, cudaDevAttrMaxGridDimZ, i);
deviceGrid[i][6] = attrVal;
}
for (int i = 0; i < devcount; i++) {
printf("Device %d has the following properties\n", i);
printf("Max Threads Per Block: %d\n", deviceGrid[i][0]);
printf("Max Blocks X: %d\n", deviceGrid[i][1]);
printf("Max Blocks Y: %d\n", deviceGrid[i][2]);
printf("Max Blocks Z: %d\n", deviceGrid[i][3]);
printf("Max Grid X: %d\n", deviceGrid[i][4]);
printf("Max Grid Y: %d\n", deviceGrid[i][5]);
printf("Max Grid Z: %d\n", deviceGrid[i][6]);
printf("----------------------------------\n");
}
} | #include <hip/hip_runtime.h>
#include <iostream>
int main(void){
int devcount;
int deviceInUse;
int attrVal;
int canAccess;
hipGetDeviceCount(&devcount);
printf("This machine has %d CUDA capable GPUs\n", devcount);
printf("------------------------\n");
int deviceGrid[devcount][8];
hipSetDevice(0);
hipGetDevice(&deviceInUse);
printf("This machine is currently using device %d\n", deviceInUse);
printf("------------------------\n");
if (devcount > 1) {
printf("Changing device in use.....\n");
for (int i = 1; i < devcount; i++) {
hipSetDevice(i);
hipGetDevice(&deviceInUse);
printf("This machine is currently using device %d\n", deviceInUse);
printf("------------------------\n");
}
printf("Checking cross talk between devices.....\n");
for (int i = 0; i < devcount; i++) {
if (i + 1 < devcount) {
hipDeviceCanAccessPeer(&canAccess, i, i + 1);
if (canAccess == 1) {
printf("CUDA Device %d can access CUDA Device %d\n", i, i + 1);
} else {
printf("ERROR CUDA Device %d can't access CUDA Device %d ensure that this devices supports P2P communication\n", i, i + 1);
}
} else {
hipDeviceCanAccessPeer(&canAccess, i, i - 1);
if (canAccess == 1){
printf("CUDA Device %d can access CUDA Device %d\n", i, i - 1);
} else {
printf("ERROR CUDA Device %d can't access CUDA Device %d ensure that this devices supports P2P communication\n", i, i - 1);
}
}
}
}
printf("------------------------\n");
printf("Building block matrix...\n");
printf("------------------------\n");
for (int i = 0; i < devcount; i++){
hipDeviceGetAttribute(&attrVal, hipDeviceAttributeMaxThreadsPerBlock, i);
deviceGrid[i][0] = attrVal;
hipDeviceGetAttribute(&attrVal, hipDeviceAttributeMaxBlockDimX, i);
deviceGrid[i][1] = attrVal;
hipDeviceGetAttribute(&attrVal, hipDeviceAttributeMaxBlockDimY, i);
deviceGrid[i][2] = attrVal;
hipDeviceGetAttribute(&attrVal, hipDeviceAttributeMaxBlockDimZ, i);
deviceGrid[i][3] = attrVal;
hipDeviceGetAttribute(&attrVal, hipDeviceAttributeMaxGridDimX, i);
deviceGrid[i][4] = attrVal;
hipDeviceGetAttribute(&attrVal, hipDeviceAttributeMaxGridDimY, i);
deviceGrid[i][5] = attrVal;
hipDeviceGetAttribute(&attrVal, hipDeviceAttributeMaxGridDimZ, i);
deviceGrid[i][6] = attrVal;
}
for (int i = 0; i < devcount; i++) {
printf("Device %d has the following properties\n", i);
printf("Max Threads Per Block: %d\n", deviceGrid[i][0]);
printf("Max Blocks X: %d\n", deviceGrid[i][1]);
printf("Max Blocks Y: %d\n", deviceGrid[i][2]);
printf("Max Blocks Z: %d\n", deviceGrid[i][3]);
printf("Max Grid X: %d\n", deviceGrid[i][4]);
printf("Max Grid Y: %d\n", deviceGrid[i][5]);
printf("Max Grid Z: %d\n", deviceGrid[i][6]);
printf("----------------------------------\n");
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <iostream>
int main(void){
int devcount;
int deviceInUse;
int attrVal;
int canAccess;
hipGetDeviceCount(&devcount);
printf("This machine has %d CUDA capable GPUs\n", devcount);
printf("------------------------\n");
int deviceGrid[devcount][8];
hipSetDevice(0);
hipGetDevice(&deviceInUse);
printf("This machine is currently using device %d\n", deviceInUse);
printf("------------------------\n");
if (devcount > 1) {
printf("Changing device in use.....\n");
for (int i = 1; i < devcount; i++) {
hipSetDevice(i);
hipGetDevice(&deviceInUse);
printf("This machine is currently using device %d\n", deviceInUse);
printf("------------------------\n");
}
printf("Checking cross talk between devices.....\n");
for (int i = 0; i < devcount; i++) {
if (i + 1 < devcount) {
hipDeviceCanAccessPeer(&canAccess, i, i + 1);
if (canAccess == 1) {
printf("CUDA Device %d can access CUDA Device %d\n", i, i + 1);
} else {
printf("ERROR CUDA Device %d can't access CUDA Device %d ensure that this devices supports P2P communication\n", i, i + 1);
}
} else {
hipDeviceCanAccessPeer(&canAccess, i, i - 1);
if (canAccess == 1){
printf("CUDA Device %d can access CUDA Device %d\n", i, i - 1);
} else {
printf("ERROR CUDA Device %d can't access CUDA Device %d ensure that this devices supports P2P communication\n", i, i - 1);
}
}
}
}
printf("------------------------\n");
printf("Building block matrix...\n");
printf("------------------------\n");
for (int i = 0; i < devcount; i++){
hipDeviceGetAttribute(&attrVal, hipDeviceAttributeMaxThreadsPerBlock, i);
deviceGrid[i][0] = attrVal;
hipDeviceGetAttribute(&attrVal, hipDeviceAttributeMaxBlockDimX, i);
deviceGrid[i][1] = attrVal;
hipDeviceGetAttribute(&attrVal, hipDeviceAttributeMaxBlockDimY, i);
deviceGrid[i][2] = attrVal;
hipDeviceGetAttribute(&attrVal, hipDeviceAttributeMaxBlockDimZ, i);
deviceGrid[i][3] = attrVal;
hipDeviceGetAttribute(&attrVal, hipDeviceAttributeMaxGridDimX, i);
deviceGrid[i][4] = attrVal;
hipDeviceGetAttribute(&attrVal, hipDeviceAttributeMaxGridDimY, i);
deviceGrid[i][5] = attrVal;
hipDeviceGetAttribute(&attrVal, hipDeviceAttributeMaxGridDimZ, i);
deviceGrid[i][6] = attrVal;
}
for (int i = 0; i < devcount; i++) {
printf("Device %d has the following properties\n", i);
printf("Max Threads Per Block: %d\n", deviceGrid[i][0]);
printf("Max Blocks X: %d\n", deviceGrid[i][1]);
printf("Max Blocks Y: %d\n", deviceGrid[i][2]);
printf("Max Blocks Z: %d\n", deviceGrid[i][3]);
printf("Max Grid X: %d\n", deviceGrid[i][4]);
printf("Max Grid Y: %d\n", deviceGrid[i][5]);
printf("Max Grid Z: %d\n", deviceGrid[i][6]);
printf("----------------------------------\n");
}
} | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <iostream>
int main(void){
int devcount;
int deviceInUse;
int attrVal;
int canAccess;
hipGetDeviceCount(&devcount);
printf("This machine has %d CUDA capable GPUs\n", devcount);
printf("------------------------\n");
int deviceGrid[devcount][8];
hipSetDevice(0);
hipGetDevice(&deviceInUse);
printf("This machine is currently using device %d\n", deviceInUse);
printf("------------------------\n");
if (devcount > 1) {
printf("Changing device in use.....\n");
for (int i = 1; i < devcount; i++) {
hipSetDevice(i);
hipGetDevice(&deviceInUse);
printf("This machine is currently using device %d\n", deviceInUse);
printf("------------------------\n");
}
printf("Checking cross talk between devices.....\n");
for (int i = 0; i < devcount; i++) {
if (i + 1 < devcount) {
hipDeviceCanAccessPeer(&canAccess, i, i + 1);
if (canAccess == 1) {
printf("CUDA Device %d can access CUDA Device %d\n", i, i + 1);
} else {
printf("ERROR CUDA Device %d can't access CUDA Device %d ensure that this devices supports P2P communication\n", i, i + 1);
}
} else {
hipDeviceCanAccessPeer(&canAccess, i, i - 1);
if (canAccess == 1){
printf("CUDA Device %d can access CUDA Device %d\n", i, i - 1);
} else {
printf("ERROR CUDA Device %d can't access CUDA Device %d ensure that this devices supports P2P communication\n", i, i - 1);
}
}
}
}
printf("------------------------\n");
printf("Building block matrix...\n");
printf("------------------------\n");
for (int i = 0; i < devcount; i++){
hipDeviceGetAttribute(&attrVal, hipDeviceAttributeMaxThreadsPerBlock, i);
deviceGrid[i][0] = attrVal;
hipDeviceGetAttribute(&attrVal, hipDeviceAttributeMaxBlockDimX, i);
deviceGrid[i][1] = attrVal;
hipDeviceGetAttribute(&attrVal, hipDeviceAttributeMaxBlockDimY, i);
deviceGrid[i][2] = attrVal;
hipDeviceGetAttribute(&attrVal, hipDeviceAttributeMaxBlockDimZ, i);
deviceGrid[i][3] = attrVal;
hipDeviceGetAttribute(&attrVal, hipDeviceAttributeMaxGridDimX, i);
deviceGrid[i][4] = attrVal;
hipDeviceGetAttribute(&attrVal, hipDeviceAttributeMaxGridDimY, i);
deviceGrid[i][5] = attrVal;
hipDeviceGetAttribute(&attrVal, hipDeviceAttributeMaxGridDimZ, i);
deviceGrid[i][6] = attrVal;
}
for (int i = 0; i < devcount; i++) {
printf("Device %d has the following properties\n", i);
printf("Max Threads Per Block: %d\n", deviceGrid[i][0]);
printf("Max Blocks X: %d\n", deviceGrid[i][1]);
printf("Max Blocks Y: %d\n", deviceGrid[i][2]);
printf("Max Blocks Z: %d\n", deviceGrid[i][3]);
printf("Max Grid X: %d\n", deviceGrid[i][4]);
printf("Max Grid Y: %d\n", deviceGrid[i][5]);
printf("Max Grid Z: %d\n", deviceGrid[i][6]);
printf("----------------------------------\n");
}
} | .text
.file "cuda_test.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $24, %rsp
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
leaq -44(%rbp), %rdi
callq hipGetDeviceCount
movl -44(%rbp), %esi
movl $.L.str, %edi
xorl %eax, %eax
callq printf
movl $.Lstr.8, %edi
callq puts@PLT
movq %rsp, -64(%rbp) # 8-byte Spill
movl -44(%rbp), %eax
movq %rsp, %r13
shlq $5, %rax
subq %rax, %r13
movq %r13, %rsp
xorl %edi, %edi
callq hipSetDevice
leaq -52(%rbp), %rdi
callq hipGetDevice
movl -52(%rbp), %esi
movl $.L.str.2, %edi
xorl %eax, %eax
callq printf
movl $.Lstr.8, %edi
callq puts@PLT
cmpl $2, -44(%rbp)
jl .LBB0_16
# %bb.1:
movl $.Lstr.2, %edi
callq puts@PLT
cmpl $2, -44(%rbp)
jl .LBB0_4
# %bb.2: # %.lr.ph.preheader
movl $1, %ebx
leaq -52(%rbp), %r14
.p2align 4, 0x90
.LBB0_3: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movl %ebx, %edi
callq hipSetDevice
movq %r14, %rdi
callq hipGetDevice
movl -52(%rbp), %esi
movl $.L.str.2, %edi
xorl %eax, %eax
callq printf
movl $.Lstr.8, %edi
callq puts@PLT
incl %ebx
cmpl -44(%rbp), %ebx
jl .LBB0_3
.LBB0_4: # %._crit_edge
movl $.Lstr.3, %edi
callq puts@PLT
movl -44(%rbp), %eax
testl %eax, %eax
jle .LBB0_16
# %bb.5: # %.lr.ph61.preheader
leaq -56(%rbp), %rbx
xorl %r15d, %r15d
jmp .LBB0_6
.p2align 4, 0x90
.LBB0_8: # in Loop: Header=BB0_6 Depth=1
movl $.L.str.5, %edi
.LBB0_9: # in Loop: Header=BB0_6 Depth=1
movl %r15d, %esi
movl %r14d, %edx
.LBB0_15: # in Loop: Header=BB0_6 Depth=1
xorl %eax, %eax
callq printf
movl -44(%rbp), %eax
movl %r14d, %r15d
cmpl %eax, %r14d
jge .LBB0_16
.LBB0_6: # %.lr.ph61
# =>This Inner Loop Header: Depth=1
leal 1(%r15), %r14d
cmpl %eax, %r14d
jge .LBB0_11
# %bb.7: # in Loop: Header=BB0_6 Depth=1
movq %rbx, %rdi
movl %r15d, %esi
movl %r14d, %edx
callq hipDeviceCanAccessPeer
cmpl $1, -56(%rbp)
je .LBB0_8
# %bb.10: # in Loop: Header=BB0_6 Depth=1
movl $.L.str.6, %edi
jmp .LBB0_9
.p2align 4, 0x90
.LBB0_11: # in Loop: Header=BB0_6 Depth=1
leal -1(%r15), %r12d
movq %rbx, %rdi
movl %r15d, %esi
movl %r12d, %edx
callq hipDeviceCanAccessPeer
cmpl $1, -56(%rbp)
jne .LBB0_13
# %bb.12: # in Loop: Header=BB0_6 Depth=1
movl $.L.str.5, %edi
jmp .LBB0_14
.p2align 4, 0x90
.LBB0_13: # in Loop: Header=BB0_6 Depth=1
movl $.L.str.6, %edi
.LBB0_14: # in Loop: Header=BB0_6 Depth=1
movl %r15d, %esi
movl %r12d, %edx
jmp .LBB0_15
.LBB0_16: # %.loopexit
movl $.Lstr.8, %edi
callq puts@PLT
movl $.Lstr.5, %edi
callq puts@PLT
movl $.Lstr.8, %edi
callq puts@PLT
cmpl $0, -44(%rbp)
jle .LBB0_19
# %bb.17: # %.lr.ph64.preheader
leaq 24(%r13), %r15
xorl %ebx, %ebx
leaq -48(%rbp), %r14
.p2align 4, 0x90
.LBB0_18: # %.lr.ph64
# =>This Inner Loop Header: Depth=1
movq %r14, %rdi
movl $56, %esi
movl %ebx, %edx
callq hipDeviceGetAttribute
movl -48(%rbp), %eax
movl %eax, -24(%r15)
movq %r14, %rdi
movl $26, %esi
movl %ebx, %edx
callq hipDeviceGetAttribute
movl -48(%rbp), %eax
movl %eax, -20(%r15)
movq %r14, %rdi
movl $27, %esi
movl %ebx, %edx
callq hipDeviceGetAttribute
movl -48(%rbp), %eax
movl %eax, -16(%r15)
movq %r14, %rdi
movl $28, %esi
movl %ebx, %edx
callq hipDeviceGetAttribute
movl -48(%rbp), %eax
movl %eax, -12(%r15)
movq %r14, %rdi
movl $29, %esi
movl %ebx, %edx
callq hipDeviceGetAttribute
movl -48(%rbp), %eax
movl %eax, -8(%r15)
movq %r14, %rdi
movl $30, %esi
movl %ebx, %edx
callq hipDeviceGetAttribute
movl -48(%rbp), %eax
movl %eax, -4(%r15)
movq %r14, %rdi
movl $31, %esi
movl %ebx, %edx
callq hipDeviceGetAttribute
movl -48(%rbp), %eax
movl %eax, (%r15)
incq %rbx
movslq -44(%rbp), %rax
addq $32, %r15
cmpq %rax, %rbx
jl .LBB0_18
.LBB0_19: # %.preheader
cmpl $0, -44(%rbp)
jle .LBB0_22
# %bb.20: # %.lr.ph66.preheader
addq $24, %r13
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB0_21: # %.lr.ph66
# =>This Inner Loop Header: Depth=1
movl $.L.str.8, %edi
movl %ebx, %esi
xorl %eax, %eax
callq printf
movl -24(%r13), %esi
movl $.L.str.9, %edi
xorl %eax, %eax
callq printf
movl -20(%r13), %esi
movl $.L.str.10, %edi
xorl %eax, %eax
callq printf
movl -16(%r13), %esi
movl $.L.str.11, %edi
xorl %eax, %eax
callq printf
movl -12(%r13), %esi
movl $.L.str.12, %edi
xorl %eax, %eax
callq printf
movl -8(%r13), %esi
movl $.L.str.13, %edi
xorl %eax, %eax
callq printf
movl -4(%r13), %esi
movl $.L.str.14, %edi
xorl %eax, %eax
callq printf
movl (%r13), %esi
movl $.L.str.15, %edi
xorl %eax, %eax
callq printf
movl $.Lstr.7, %edi
callq puts@PLT
incq %rbx
movslq -44(%rbp), %rax
addq $32, %r13
cmpq %rax, %rbx
jl .LBB0_21
.LBB0_22: # %._crit_edge67
movq -64(%rbp), %rsp # 8-byte Reload
xorl %eax, %eax
leaq -40(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
.cfi_def_cfa %rsp, 8
retq
.Lfunc_end0:
.size main, .Lfunc_end0-main
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "This machine has %d CUDA capable GPUs\n"
.size .L.str, 39
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "This machine is currently using device %d\n"
.size .L.str.2, 43
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "CUDA Device %d can access CUDA Device %d\n"
.size .L.str.5, 42
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "ERROR CUDA Device %d can't access CUDA Device %d ensure that this devices supports P2P communication\n"
.size .L.str.6, 102
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "Device %d has the following properties\n"
.size .L.str.8, 40
.type .L.str.9,@object # @.str.9
.L.str.9:
.asciz "Max Threads Per Block: %d\n"
.size .L.str.9, 27
.type .L.str.10,@object # @.str.10
.L.str.10:
.asciz "Max Blocks X: %d\n"
.size .L.str.10, 18
.type .L.str.11,@object # @.str.11
.L.str.11:
.asciz "Max Blocks Y: %d\n"
.size .L.str.11, 18
.type .L.str.12,@object # @.str.12
.L.str.12:
.asciz "Max Blocks Z: %d\n"
.size .L.str.12, 18
.type .L.str.13,@object # @.str.13
.L.str.13:
.asciz "Max Grid X: %d\n"
.size .L.str.13, 16
.type .L.str.14,@object # @.str.14
.L.str.14:
.asciz "Max Grid Y: %d\n"
.size .L.str.14, 16
.type .L.str.15,@object # @.str.15
.L.str.15:
.asciz "Max Grid Z: %d\n"
.size .L.str.15, 16
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr.2,@object # @str.2
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr.2:
.asciz "Changing device in use....."
.size .Lstr.2, 28
.type .Lstr.3,@object # @str.3
.Lstr.3:
.asciz "Checking cross talk between devices....."
.size .Lstr.3, 41
.type .Lstr.5,@object # @str.5
.Lstr.5:
.asciz "Building block matrix..."
.size .Lstr.5, 25
.type .Lstr.7,@object # @str.7
.Lstr.7:
.asciz "----------------------------------"
.size .Lstr.7, 35
.type .Lstr.8,@object # @str.8
.Lstr.8:
.asciz "------------------------"
.size .Lstr.8, 25
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80 | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0015e627_00000000-6_cuda_test.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3672:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3672:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "This machine has %d CUDA capable GPUs\n"
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "------------------------\n"
.section .rodata.str1.8
.align 8
.LC2:
.string "This machine is currently using device %d\n"
.section .rodata.str1.1
.LC3:
.string "Changing device in use.....\n"
.section .rodata.str1.8
.align 8
.LC4:
.string "Checking cross talk between devices.....\n"
.align 8
.LC5:
.string "CUDA Device %d can access CUDA Device %d\n"
.align 8
.LC6:
.string "ERROR CUDA Device %d can't access CUDA Device %d ensure that this devices supports P2P communication\n"
.section .rodata.str1.1
.LC7:
.string "Building block matrix...\n"
.section .rodata.str1.8
.align 8
.LC8:
.string "Device %d has the following properties\n"
.section .rodata.str1.1
.LC9:
.string "Max Threads Per Block: %d\n"
.LC10:
.string "Max Blocks X: %d\n"
.LC11:
.string "Max Blocks Y: %d\n"
.LC12:
.string "Max Blocks Z: %d\n"
.LC13:
.string "Max Grid X: %d\n"
.LC14:
.string "Max Grid Y: %d\n"
.LC15:
.string "Max Grid Z: %d\n"
.section .rodata.str1.8
.align 8
.LC16:
.string "----------------------------------\n"
.text
.globl main
.type main, @function
main:
.LFB3669:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
movq %rsp, %rbp
.cfi_def_cfa_register 6
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $56, %rsp
.cfi_offset 15, -24
.cfi_offset 14, -32
.cfi_offset 13, -40
.cfi_offset 12, -48
.cfi_offset 3, -56
movq %fs:40, %rax
movq %rax, -56(%rbp)
xorl %eax, %eax
leaq -72(%rbp), %rdi
call cudaGetDeviceCount@PLT
movl -72(%rbp), %edx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movslq -72(%rbp), %rax
salq $5, %rax
movq %rax, %rcx
andq $-4096, %rcx
movq %rsp, %rdx
subq %rcx, %rdx
.L4:
cmpq %rdx, %rsp
je .L5
subq $4096, %rsp
orq $0, 4088(%rsp)
jmp .L4
.L5:
movq %rax, %rdx
andl $4095, %edx
subq %rdx, %rsp
testq %rdx, %rdx
je .L6
orq $0, -8(%rsp,%rdx)
.L6:
movq %rsp, -88(%rbp)
movl $0, %edi
call cudaSetDevice@PLT
leaq -68(%rbp), %rdi
call cudaGetDevice@PLT
movl -68(%rbp), %edx
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
cmpl $1, -72(%rbp)
jg .L24
.L7:
leaq .LC1(%rip), %rbx
movq %rbx, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %rbx, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
cmpl $0, -72(%rbp)
jle .L15
movq -88(%rbp), %r13
movq %r13, %rbx
movl $0, %r12d
leaq -64(%rbp), %r14
.L16:
movl %r12d, %edx
movl $1, %esi
movq %r14, %rdi
call cudaDeviceGetAttribute@PLT
movl -64(%rbp), %eax
movl %eax, 0(%r13)
movl %r12d, %edx
movl $2, %esi
movq %r14, %rdi
call cudaDeviceGetAttribute@PLT
movl -64(%rbp), %eax
movl %eax, 4(%r13)
movl %r12d, %edx
movl $3, %esi
movq %r14, %rdi
call cudaDeviceGetAttribute@PLT
movl -64(%rbp), %eax
movl %eax, 8(%r13)
movl %r12d, %edx
movl $4, %esi
movq %r14, %rdi
call cudaDeviceGetAttribute@PLT
movl -64(%rbp), %eax
movl %eax, 12(%r13)
movl %r12d, %edx
movl $5, %esi
movq %r14, %rdi
call cudaDeviceGetAttribute@PLT
movl -64(%rbp), %eax
movl %eax, 16(%r13)
movl %r12d, %edx
movl $6, %esi
movq %r14, %rdi
call cudaDeviceGetAttribute@PLT
movl -64(%rbp), %eax
movl %eax, 20(%r13)
movl %r12d, %edx
movl $7, %esi
movq %r14, %rdi
call cudaDeviceGetAttribute@PLT
movl -64(%rbp), %eax
movl %eax, 24(%r13)
addl $1, %r12d
movl -72(%rbp), %eax
addq $32, %r13
cmpl %r12d, %eax
jg .L16
testl %eax, %eax
jle .L15
movl $0, %r12d
leaq .LC8(%rip), %r15
leaq .LC9(%rip), %r14
leaq .LC10(%rip), %r13
.L17:
movl %r12d, %edx
movq %r15, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl (%rbx), %edx
movq %r14, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 4(%rbx), %edx
movq %r13, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 8(%rbx), %edx
leaq .LC11(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 12(%rbx), %edx
leaq .LC12(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 16(%rbx), %edx
leaq .LC13(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 20(%rbx), %edx
leaq .LC14(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 24(%rbx), %edx
leaq .LC15(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq .LC16(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addl $1, %r12d
addq $32, %rbx
cmpl %r12d, -72(%rbp)
jg .L17
.L15:
movq -56(%rbp), %rax
subq %fs:40, %rax
jne .L25
movl $0, %eax
leaq -40(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
.cfi_remember_state
.cfi_def_cfa 7, 8
ret
.L24:
.cfi_restore_state
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
cmpl $1, -72(%rbp)
jle .L8
movl $1, %ebx
leaq -68(%rbp), %r14
leaq .LC2(%rip), %r13
leaq .LC1(%rip), %r12
.L9:
movl %ebx, %edi
call cudaSetDevice@PLT
movq %r14, %rdi
call cudaGetDevice@PLT
movl -68(%rbp), %edx
movq %r13, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %r12, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addl $1, %ebx
cmpl %ebx, -72(%rbp)
jg .L9
.L8:
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl -72(%rbp), %eax
testl %eax, %eax
jle .L7
movl $0, %ebx
leaq -60(%rbp), %r14
leaq .LC6(%rip), %r15
jmp .L14
.L27:
movl %ebx, %ecx
movl %r12d, %edx
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L12
.L10:
leal -1(%r12), %r13d
movl %r13d, %edx
movl %r12d, %esi
movq %r14, %rdi
call cudaDeviceCanAccessPeer@PLT
cmpl $1, -60(%rbp)
je .L26
movl %r13d, %ecx
movl %r12d, %edx
movq %r15, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
.L12:
movl -72(%rbp), %eax
cmpl %eax, %ebx
jge .L7
.L14:
movl %ebx, %r12d
addl $1, %ebx
cmpl %eax, %ebx
jge .L10
movl %ebx, %edx
movl %r12d, %esi
movq %r14, %rdi
call cudaDeviceCanAccessPeer@PLT
cmpl $1, -60(%rbp)
je .L27
movl %ebx, %ecx
movl %r12d, %edx
movq %r15, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L12
.L26:
movl %r13d, %ecx
movl %r12d, %edx
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L12
.L25:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3669:
.size main, .-main
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3695:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3695:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "cuda_test.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $24, %rsp
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
leaq -44(%rbp), %rdi
callq hipGetDeviceCount
movl -44(%rbp), %esi
movl $.L.str, %edi
xorl %eax, %eax
callq printf
movl $.Lstr.8, %edi
callq puts@PLT
movq %rsp, -64(%rbp) # 8-byte Spill
movl -44(%rbp), %eax
movq %rsp, %r13
shlq $5, %rax
subq %rax, %r13
movq %r13, %rsp
xorl %edi, %edi
callq hipSetDevice
leaq -52(%rbp), %rdi
callq hipGetDevice
movl -52(%rbp), %esi
movl $.L.str.2, %edi
xorl %eax, %eax
callq printf
movl $.Lstr.8, %edi
callq puts@PLT
cmpl $2, -44(%rbp)
jl .LBB0_16
# %bb.1:
movl $.Lstr.2, %edi
callq puts@PLT
cmpl $2, -44(%rbp)
jl .LBB0_4
# %bb.2: # %.lr.ph.preheader
movl $1, %ebx
leaq -52(%rbp), %r14
.p2align 4, 0x90
.LBB0_3: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movl %ebx, %edi
callq hipSetDevice
movq %r14, %rdi
callq hipGetDevice
movl -52(%rbp), %esi
movl $.L.str.2, %edi
xorl %eax, %eax
callq printf
movl $.Lstr.8, %edi
callq puts@PLT
incl %ebx
cmpl -44(%rbp), %ebx
jl .LBB0_3
.LBB0_4: # %._crit_edge
movl $.Lstr.3, %edi
callq puts@PLT
movl -44(%rbp), %eax
testl %eax, %eax
jle .LBB0_16
# %bb.5: # %.lr.ph61.preheader
leaq -56(%rbp), %rbx
xorl %r15d, %r15d
jmp .LBB0_6
.p2align 4, 0x90
.LBB0_8: # in Loop: Header=BB0_6 Depth=1
movl $.L.str.5, %edi
.LBB0_9: # in Loop: Header=BB0_6 Depth=1
movl %r15d, %esi
movl %r14d, %edx
.LBB0_15: # in Loop: Header=BB0_6 Depth=1
xorl %eax, %eax
callq printf
movl -44(%rbp), %eax
movl %r14d, %r15d
cmpl %eax, %r14d
jge .LBB0_16
.LBB0_6: # %.lr.ph61
# =>This Inner Loop Header: Depth=1
leal 1(%r15), %r14d
cmpl %eax, %r14d
jge .LBB0_11
# %bb.7: # in Loop: Header=BB0_6 Depth=1
movq %rbx, %rdi
movl %r15d, %esi
movl %r14d, %edx
callq hipDeviceCanAccessPeer
cmpl $1, -56(%rbp)
je .LBB0_8
# %bb.10: # in Loop: Header=BB0_6 Depth=1
movl $.L.str.6, %edi
jmp .LBB0_9
.p2align 4, 0x90
.LBB0_11: # in Loop: Header=BB0_6 Depth=1
leal -1(%r15), %r12d
movq %rbx, %rdi
movl %r15d, %esi
movl %r12d, %edx
callq hipDeviceCanAccessPeer
cmpl $1, -56(%rbp)
jne .LBB0_13
# %bb.12: # in Loop: Header=BB0_6 Depth=1
movl $.L.str.5, %edi
jmp .LBB0_14
.p2align 4, 0x90
.LBB0_13: # in Loop: Header=BB0_6 Depth=1
movl $.L.str.6, %edi
.LBB0_14: # in Loop: Header=BB0_6 Depth=1
movl %r15d, %esi
movl %r12d, %edx
jmp .LBB0_15
.LBB0_16: # %.loopexit
movl $.Lstr.8, %edi
callq puts@PLT
movl $.Lstr.5, %edi
callq puts@PLT
movl $.Lstr.8, %edi
callq puts@PLT
cmpl $0, -44(%rbp)
jle .LBB0_19
# %bb.17: # %.lr.ph64.preheader
leaq 24(%r13), %r15
xorl %ebx, %ebx
leaq -48(%rbp), %r14
.p2align 4, 0x90
.LBB0_18: # %.lr.ph64
# =>This Inner Loop Header: Depth=1
movq %r14, %rdi
movl $56, %esi
movl %ebx, %edx
callq hipDeviceGetAttribute
movl -48(%rbp), %eax
movl %eax, -24(%r15)
movq %r14, %rdi
movl $26, %esi
movl %ebx, %edx
callq hipDeviceGetAttribute
movl -48(%rbp), %eax
movl %eax, -20(%r15)
movq %r14, %rdi
movl $27, %esi
movl %ebx, %edx
callq hipDeviceGetAttribute
movl -48(%rbp), %eax
movl %eax, -16(%r15)
movq %r14, %rdi
movl $28, %esi
movl %ebx, %edx
callq hipDeviceGetAttribute
movl -48(%rbp), %eax
movl %eax, -12(%r15)
movq %r14, %rdi
movl $29, %esi
movl %ebx, %edx
callq hipDeviceGetAttribute
movl -48(%rbp), %eax
movl %eax, -8(%r15)
movq %r14, %rdi
movl $30, %esi
movl %ebx, %edx
callq hipDeviceGetAttribute
movl -48(%rbp), %eax
movl %eax, -4(%r15)
movq %r14, %rdi
movl $31, %esi
movl %ebx, %edx
callq hipDeviceGetAttribute
movl -48(%rbp), %eax
movl %eax, (%r15)
incq %rbx
movslq -44(%rbp), %rax
addq $32, %r15
cmpq %rax, %rbx
jl .LBB0_18
.LBB0_19: # %.preheader
cmpl $0, -44(%rbp)
jle .LBB0_22
# %bb.20: # %.lr.ph66.preheader
addq $24, %r13
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB0_21: # %.lr.ph66
# =>This Inner Loop Header: Depth=1
movl $.L.str.8, %edi
movl %ebx, %esi
xorl %eax, %eax
callq printf
movl -24(%r13), %esi
movl $.L.str.9, %edi
xorl %eax, %eax
callq printf
movl -20(%r13), %esi
movl $.L.str.10, %edi
xorl %eax, %eax
callq printf
movl -16(%r13), %esi
movl $.L.str.11, %edi
xorl %eax, %eax
callq printf
movl -12(%r13), %esi
movl $.L.str.12, %edi
xorl %eax, %eax
callq printf
movl -8(%r13), %esi
movl $.L.str.13, %edi
xorl %eax, %eax
callq printf
movl -4(%r13), %esi
movl $.L.str.14, %edi
xorl %eax, %eax
callq printf
movl (%r13), %esi
movl $.L.str.15, %edi
xorl %eax, %eax
callq printf
movl $.Lstr.7, %edi
callq puts@PLT
incq %rbx
movslq -44(%rbp), %rax
addq $32, %r13
cmpq %rax, %rbx
jl .LBB0_21
.LBB0_22: # %._crit_edge67
movq -64(%rbp), %rsp # 8-byte Reload
xorl %eax, %eax
leaq -40(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
.cfi_def_cfa %rsp, 8
retq
.Lfunc_end0:
.size main, .Lfunc_end0-main
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "This machine has %d CUDA capable GPUs\n"
.size .L.str, 39
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "This machine is currently using device %d\n"
.size .L.str.2, 43
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "CUDA Device %d can access CUDA Device %d\n"
.size .L.str.5, 42
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "ERROR CUDA Device %d can't access CUDA Device %d ensure that this devices supports P2P communication\n"
.size .L.str.6, 102
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "Device %d has the following properties\n"
.size .L.str.8, 40
.type .L.str.9,@object # @.str.9
.L.str.9:
.asciz "Max Threads Per Block: %d\n"
.size .L.str.9, 27
.type .L.str.10,@object # @.str.10
.L.str.10:
.asciz "Max Blocks X: %d\n"
.size .L.str.10, 18
.type .L.str.11,@object # @.str.11
.L.str.11:
.asciz "Max Blocks Y: %d\n"
.size .L.str.11, 18
.type .L.str.12,@object # @.str.12
.L.str.12:
.asciz "Max Blocks Z: %d\n"
.size .L.str.12, 18
.type .L.str.13,@object # @.str.13
.L.str.13:
.asciz "Max Grid X: %d\n"
.size .L.str.13, 16
.type .L.str.14,@object # @.str.14
.L.str.14:
.asciz "Max Grid Y: %d\n"
.size .L.str.14, 16
.type .L.str.15,@object # @.str.15
.L.str.15:
.asciz "Max Grid Z: %d\n"
.size .L.str.15, 16
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr.2,@object # @str.2
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr.2:
.asciz "Changing device in use....."
.size .Lstr.2, 28
.type .Lstr.3,@object # @str.3
.Lstr.3:
.asciz "Checking cross talk between devices....."
.size .Lstr.3, 41
.type .Lstr.5,@object # @str.5
.Lstr.5:
.asciz "Building block matrix..."
.size .Lstr.5, 25
.type .Lstr.7,@object # @str.7
.Lstr.7:
.asciz "----------------------------------"
.size .Lstr.7, 35
.type .Lstr.8,@object # @str.8
.Lstr.8:
.asciz "------------------------"
.size .Lstr.8, 25
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | __device__ void _sum_32_20_0(volatile float *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
float ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=ai+xi;
ai=x[i]; xi=x[i+16]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi;
}
__global__ void _sum_32_20_1(int n, float *x, float *y) {
__shared__ float buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
float ai, xi;
// sum the elements assigned to this thread
ai = 0;
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=xi; ai=ai+xi;
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_sum_32_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _sum_32_20_2(float *y,float *z) { // sum block results in y
__shared__ float buffer[64];
float ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads();
}
if(tid<32) {
_sum_32_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
float sum_32_20(int n, float *x) {
float r;
static float *y;
static float *z;
if (y == NULL) cudaMalloc(&y, 64*sizeof(float)); // sum for each block
if (z == NULL) cudaMalloc(&z, sizeof(float)); // final sum
_sum_32_20_1<<<64,64>>>(n,x,y);
_sum_32_20_2<<<1,64>>>(y,z);
cudaMemcpy(&r,z,sizeof(float),cudaMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
__device__ void _sum_64_20_0(volatile double *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
double ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=ai+xi;
ai=x[i]; xi=x[i+16]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi;
}
__global__ void _sum_64_20_1(int n, double *x, double *y) {
__shared__ double buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
double ai, xi;
// sum the elements assigned to this thread
ai = 0;
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=xi; ai=ai+xi;
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_sum_64_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _sum_64_20_2(double *y,double *z) { // sum block results in y
__shared__ double buffer[64];
double ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads();
}
if(tid<32) {
_sum_64_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
double sum_64_20(int n, double *x) {
double r;
static double *y;
static double *z;
if (y == NULL) cudaMalloc(&y, 64*sizeof(double)); // sum for each block
if (z == NULL) cudaMalloc(&z, sizeof(double)); // final sum
_sum_64_20_1<<<64,64>>>(n,x,y);
_sum_64_20_2<<<1,64>>>(y,z);
cudaMemcpy(&r,z,sizeof(double),cudaMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
__device__ void _prod_32_20_0(volatile float *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
float ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=ai*xi;
ai=x[i]; xi=x[i+16]; x[i]=ai*xi;
ai=x[i]; xi=x[i+ 8]; x[i]=ai*xi;
ai=x[i]; xi=x[i+ 4]; x[i]=ai*xi;
ai=x[i]; xi=x[i+ 2]; x[i]=ai*xi;
ai=x[i]; xi=x[i+ 1]; x[i]=ai*xi;
}
__global__ void _prod_32_20_1(int n, float *x, float *y) {
__shared__ float buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
float ai, xi;
// sum the elements assigned to this thread
ai = 1;
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=xi; ai=ai*xi;
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai*xi;
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_prod_32_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _prod_32_20_2(float *y,float *z) { // sum block results in y
__shared__ float buffer[64];
float ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai*xi;
}
__syncthreads();
}
if(tid<32) {
_prod_32_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
float prod_32_20(int n, float *x) {
float r;
static float *y;
static float *z;
if (y == NULL) cudaMalloc(&y, 64*sizeof(float)); // sum for each block
if (z == NULL) cudaMalloc(&z, sizeof(float)); // final sum
_prod_32_20_1<<<64,64>>>(n,x,y);
_prod_32_20_2<<<1,64>>>(y,z);
cudaMemcpy(&r,z,sizeof(float),cudaMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
__device__ void _prod_64_20_0(volatile double *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
double ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=ai*xi;
ai=x[i]; xi=x[i+16]; x[i]=ai*xi;
ai=x[i]; xi=x[i+ 8]; x[i]=ai*xi;
ai=x[i]; xi=x[i+ 4]; x[i]=ai*xi;
ai=x[i]; xi=x[i+ 2]; x[i]=ai*xi;
ai=x[i]; xi=x[i+ 1]; x[i]=ai*xi;
}
__global__ void _prod_64_20_1(int n, double *x, double *y) {
__shared__ double buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
double ai, xi;
// sum the elements assigned to this thread
ai = 1;
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=xi; ai=ai*xi;
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai*xi;
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_prod_64_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _prod_64_20_2(double *y,double *z) { // sum block results in y
__shared__ double buffer[64];
double ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai*xi;
}
__syncthreads();
}
if(tid<32) {
_prod_64_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
double prod_64_20(int n, double *x) {
double r;
static double *y;
static double *z;
if (y == NULL) cudaMalloc(&y, 64*sizeof(double)); // sum for each block
if (z == NULL) cudaMalloc(&z, sizeof(double)); // final sum
_prod_64_20_1<<<64,64>>>(n,x,y);
_prod_64_20_2<<<1,64>>>(y,z);
cudaMemcpy(&r,z,sizeof(double),cudaMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
__device__ void _maximum_32_20_0(volatile float *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
float ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=(ai>xi?ai:xi);
ai=x[i]; xi=x[i+16]; x[i]=(ai>xi?ai:xi);
ai=x[i]; xi=x[i+ 8]; x[i]=(ai>xi?ai:xi);
ai=x[i]; xi=x[i+ 4]; x[i]=(ai>xi?ai:xi);
ai=x[i]; xi=x[i+ 2]; x[i]=(ai>xi?ai:xi);
ai=x[i]; xi=x[i+ 1]; x[i]=(ai>xi?ai:xi);
}
__global__ void _maximum_32_20_1(int n, float *x, float *y) {
__shared__ float buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
float ai, xi;
// sum the elements assigned to this thread
ai = (-INFINITY);
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=xi; ai=(ai>xi?ai:xi);
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai>xi?ai:xi);
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_maximum_32_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _maximum_32_20_2(float *y,float *z) { // sum block results in y
__shared__ float buffer[64];
float ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai>xi?ai:xi);
}
__syncthreads();
}
if(tid<32) {
_maximum_32_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
float maximum_32_20(int n, float *x) {
float r;
static float *y;
static float *z;
if (y == NULL) cudaMalloc(&y, 64*sizeof(float)); // sum for each block
if (z == NULL) cudaMalloc(&z, sizeof(float)); // final sum
_maximum_32_20_1<<<64,64>>>(n,x,y);
_maximum_32_20_2<<<1,64>>>(y,z);
cudaMemcpy(&r,z,sizeof(float),cudaMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
__device__ void _maximum_64_20_0(volatile double *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
double ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=(ai>xi?ai:xi);
ai=x[i]; xi=x[i+16]; x[i]=(ai>xi?ai:xi);
ai=x[i]; xi=x[i+ 8]; x[i]=(ai>xi?ai:xi);
ai=x[i]; xi=x[i+ 4]; x[i]=(ai>xi?ai:xi);
ai=x[i]; xi=x[i+ 2]; x[i]=(ai>xi?ai:xi);
ai=x[i]; xi=x[i+ 1]; x[i]=(ai>xi?ai:xi);
}
__global__ void _maximum_64_20_1(int n, double *x, double *y) {
__shared__ double buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
double ai, xi;
// sum the elements assigned to this thread
ai = (-INFINITY);
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=xi; ai=(ai>xi?ai:xi);
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai>xi?ai:xi);
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_maximum_64_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _maximum_64_20_2(double *y,double *z) { // sum block results in y
__shared__ double buffer[64];
double ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai>xi?ai:xi);
}
__syncthreads();
}
if(tid<32) {
_maximum_64_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
double maximum_64_20(int n, double *x) {
double r;
static double *y;
static double *z;
if (y == NULL) cudaMalloc(&y, 64*sizeof(double)); // sum for each block
if (z == NULL) cudaMalloc(&z, sizeof(double)); // final sum
_maximum_64_20_1<<<64,64>>>(n,x,y);
_maximum_64_20_2<<<1,64>>>(y,z);
cudaMemcpy(&r,z,sizeof(double),cudaMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
__device__ void _minimum_32_20_0(volatile float *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
float ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=(ai<xi?ai:xi);
ai=x[i]; xi=x[i+16]; x[i]=(ai<xi?ai:xi);
ai=x[i]; xi=x[i+ 8]; x[i]=(ai<xi?ai:xi);
ai=x[i]; xi=x[i+ 4]; x[i]=(ai<xi?ai:xi);
ai=x[i]; xi=x[i+ 2]; x[i]=(ai<xi?ai:xi);
ai=x[i]; xi=x[i+ 1]; x[i]=(ai<xi?ai:xi);
}
__global__ void _minimum_32_20_1(int n, float *x, float *y) {
__shared__ float buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
float ai, xi;
// sum the elements assigned to this thread
ai = INFINITY;
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=xi; ai=(ai<xi?ai:xi);
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai<xi?ai:xi);
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_minimum_32_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _minimum_32_20_2(float *y,float *z) { // sum block results in y
__shared__ float buffer[64];
float ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai<xi?ai:xi);
}
__syncthreads();
}
if(tid<32) {
_minimum_32_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
float minimum_32_20(int n, float *x) {
float r;
static float *y;
static float *z;
if (y == NULL) cudaMalloc(&y, 64*sizeof(float)); // sum for each block
if (z == NULL) cudaMalloc(&z, sizeof(float)); // final sum
_minimum_32_20_1<<<64,64>>>(n,x,y);
_minimum_32_20_2<<<1,64>>>(y,z);
cudaMemcpy(&r,z,sizeof(float),cudaMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
__device__ void _minimum_64_20_0(volatile double *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
double ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=(ai<xi?ai:xi);
ai=x[i]; xi=x[i+16]; x[i]=(ai<xi?ai:xi);
ai=x[i]; xi=x[i+ 8]; x[i]=(ai<xi?ai:xi);
ai=x[i]; xi=x[i+ 4]; x[i]=(ai<xi?ai:xi);
ai=x[i]; xi=x[i+ 2]; x[i]=(ai<xi?ai:xi);
ai=x[i]; xi=x[i+ 1]; x[i]=(ai<xi?ai:xi);
}
__global__ void _minimum_64_20_1(int n, double *x, double *y) {
__shared__ double buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
double ai, xi;
// sum the elements assigned to this thread
ai = INFINITY;
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=xi; ai=(ai<xi?ai:xi);
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai<xi?ai:xi);
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_minimum_64_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _minimum_64_20_2(double *y,double *z) { // sum block results in y
__shared__ double buffer[64];
double ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai<xi?ai:xi);
}
__syncthreads();
}
if(tid<32) {
_minimum_64_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
double minimum_64_20(int n, double *x) {
double r;
static double *y;
static double *z;
if (y == NULL) cudaMalloc(&y, 64*sizeof(double)); // sum for each block
if (z == NULL) cudaMalloc(&z, sizeof(double)); // final sum
_minimum_64_20_1<<<64,64>>>(n,x,y);
_minimum_64_20_2<<<1,64>>>(y,z);
cudaMemcpy(&r,z,sizeof(double),cudaMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
__device__ void _sumabs_32_20_0(volatile float *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
float ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=ai+xi;
ai=x[i]; xi=x[i+16]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi;
}
__global__ void _sumabs_32_20_1(int n, float *x, float *y) {
__shared__ float buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
float ai, xi;
// sum the elements assigned to this thread
ai = 0;
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=(xi<0?-xi:xi); ai=ai+xi;
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_sumabs_32_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _sumabs_32_20_2(float *y,float *z) { // sum block results in y
__shared__ float buffer[64];
float ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads();
}
if(tid<32) {
_sumabs_32_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
float sumabs_32_20(int n, float *x) {
float r;
static float *y;
static float *z;
if (y == NULL) cudaMalloc(&y, 64*sizeof(float)); // sum for each block
if (z == NULL) cudaMalloc(&z, sizeof(float)); // final sum
_sumabs_32_20_1<<<64,64>>>(n,x,y);
_sumabs_32_20_2<<<1,64>>>(y,z);
cudaMemcpy(&r,z,sizeof(float),cudaMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
__device__ void _sumabs_64_20_0(volatile double *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
double ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=ai+xi;
ai=x[i]; xi=x[i+16]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi;
}
__global__ void _sumabs_64_20_1(int n, double *x, double *y) {
__shared__ double buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
double ai, xi;
// sum the elements assigned to this thread
ai = 0;
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=(xi<0?-xi:xi); ai=ai+xi;
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_sumabs_64_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _sumabs_64_20_2(double *y,double *z) { // sum block results in y
__shared__ double buffer[64];
double ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads();
}
if(tid<32) {
_sumabs_64_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
double sumabs_64_20(int n, double *x) {
double r;
static double *y;
static double *z;
if (y == NULL) cudaMalloc(&y, 64*sizeof(double)); // sum for each block
if (z == NULL) cudaMalloc(&z, sizeof(double)); // final sum
_sumabs_64_20_1<<<64,64>>>(n,x,y);
_sumabs_64_20_2<<<1,64>>>(y,z);
cudaMemcpy(&r,z,sizeof(double),cudaMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
__device__ void _sumabs2_32_20_0(volatile float *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
float ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=ai+xi;
ai=x[i]; xi=x[i+16]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi;
}
__global__ void _sumabs2_32_20_1(int n, float *x, float *y) {
__shared__ float buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
float ai, xi;
// sum the elements assigned to this thread
ai = 0;
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=(xi*xi); ai=ai+xi;
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_sumabs2_32_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _sumabs2_32_20_2(float *y,float *z) { // sum block results in y
__shared__ float buffer[64];
float ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads();
}
if(tid<32) {
_sumabs2_32_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
float sumabs2_32_20(int n, float *x) {
float r;
static float *y;
static float *z;
if (y == NULL) cudaMalloc(&y, 64*sizeof(float)); // sum for each block
if (z == NULL) cudaMalloc(&z, sizeof(float)); // final sum
_sumabs2_32_20_1<<<64,64>>>(n,x,y);
_sumabs2_32_20_2<<<1,64>>>(y,z);
cudaMemcpy(&r,z,sizeof(float),cudaMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
__device__ void _sumabs2_64_20_0(volatile double *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
double ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=ai+xi;
ai=x[i]; xi=x[i+16]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi;
}
__global__ void _sumabs2_64_20_1(int n, double *x, double *y) {
__shared__ double buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
double ai, xi;
// sum the elements assigned to this thread
ai = 0;
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=(xi*xi); ai=ai+xi;
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_sumabs2_64_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _sumabs2_64_20_2(double *y,double *z) { // sum block results in y
__shared__ double buffer[64];
double ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads();
}
if(tid<32) {
_sumabs2_64_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
double sumabs2_64_20(int n, double *x) {
double r;
static double *y;
static double *z;
if (y == NULL) cudaMalloc(&y, 64*sizeof(double)); // sum for each block
if (z == NULL) cudaMalloc(&z, sizeof(double)); // final sum
_sumabs2_64_20_1<<<64,64>>>(n,x,y);
_sumabs2_64_20_2<<<1,64>>>(y,z);
cudaMemcpy(&r,z,sizeof(double),cudaMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
__device__ void _countnz_32_20_0(volatile float *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
float ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=ai+xi;
ai=x[i]; xi=x[i+16]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi;
}
__global__ void _countnz_32_20_1(int n, float *x, float *y) {
__shared__ float buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
float ai, xi;
// sum the elements assigned to this thread
ai = 0;
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=(xi!=0); ai=ai+xi;
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_countnz_32_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _countnz_32_20_2(float *y,float *z) { // sum block results in y
__shared__ float buffer[64];
float ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads();
}
if(tid<32) {
_countnz_32_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
float countnz_32_20(int n, float *x) {
float r;
static float *y;
static float *z;
if (y == NULL) cudaMalloc(&y, 64*sizeof(float)); // sum for each block
if (z == NULL) cudaMalloc(&z, sizeof(float)); // final sum
_countnz_32_20_1<<<64,64>>>(n,x,y);
_countnz_32_20_2<<<1,64>>>(y,z);
cudaMemcpy(&r,z,sizeof(float),cudaMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
__device__ void _countnz_64_20_0(volatile double *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
double ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=ai+xi;
ai=x[i]; xi=x[i+16]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi;
}
__global__ void _countnz_64_20_1(int n, double *x, double *y) {
__shared__ double buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
double ai, xi;
// sum the elements assigned to this thread
ai = 0;
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=(xi!=0); ai=ai+xi;
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_countnz_64_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _countnz_64_20_2(double *y,double *z) { // sum block results in y
__shared__ double buffer[64];
double ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads();
}
if(tid<32) {
_countnz_64_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
double countnz_64_20(int n, double *x) {
double r;
static double *y;
static double *z;
if (y == NULL) cudaMalloc(&y, 64*sizeof(double)); // sum for each block
if (z == NULL) cudaMalloc(&z, sizeof(double)); // final sum
_countnz_64_20_1<<<64,64>>>(n,x,y);
_countnz_64_20_2<<<1,64>>>(y,z);
cudaMemcpy(&r,z,sizeof(double),cudaMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif | #include <hip/hip_runtime.h>
__device__ void _sum_32_20_0(volatile float *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
float ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=ai+xi;
ai=x[i]; xi=x[i+16]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi;
}
__global__ void _sum_32_20_1(int n, float *x, float *y) {
__shared__ float buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
float ai, xi;
// sum the elements assigned to this thread
ai = 0;
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=xi; ai=ai+xi;
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_sum_32_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _sum_32_20_2(float *y,float *z) { // sum block results in y
__shared__ float buffer[64];
float ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads();
}
if(tid<32) {
_sum_32_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
float sum_32_20(int n, float *x) {
float r;
static float *y;
static float *z;
if (y == NULL) hipMalloc(&y, 64*sizeof(float)); // sum for each block
if (z == NULL) hipMalloc(&z, sizeof(float)); // final sum
_sum_32_20_1<<<64,64>>>(n,x,y);
_sum_32_20_2<<<1,64>>>(y,z);
hipMemcpy(&r,z,sizeof(float),hipMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
__device__ void _sum_64_20_0(volatile double *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
double ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=ai+xi;
ai=x[i]; xi=x[i+16]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi;
}
__global__ void _sum_64_20_1(int n, double *x, double *y) {
__shared__ double buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
double ai, xi;
// sum the elements assigned to this thread
ai = 0;
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=xi; ai=ai+xi;
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_sum_64_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _sum_64_20_2(double *y,double *z) { // sum block results in y
__shared__ double buffer[64];
double ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads();
}
if(tid<32) {
_sum_64_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
double sum_64_20(int n, double *x) {
double r;
static double *y;
static double *z;
if (y == NULL) hipMalloc(&y, 64*sizeof(double)); // sum for each block
if (z == NULL) hipMalloc(&z, sizeof(double)); // final sum
_sum_64_20_1<<<64,64>>>(n,x,y);
_sum_64_20_2<<<1,64>>>(y,z);
hipMemcpy(&r,z,sizeof(double),hipMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
__device__ void _prod_32_20_0(volatile float *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
float ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=ai*xi;
ai=x[i]; xi=x[i+16]; x[i]=ai*xi;
ai=x[i]; xi=x[i+ 8]; x[i]=ai*xi;
ai=x[i]; xi=x[i+ 4]; x[i]=ai*xi;
ai=x[i]; xi=x[i+ 2]; x[i]=ai*xi;
ai=x[i]; xi=x[i+ 1]; x[i]=ai*xi;
}
__global__ void _prod_32_20_1(int n, float *x, float *y) {
__shared__ float buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
float ai, xi;
// sum the elements assigned to this thread
ai = 1;
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=xi; ai=ai*xi;
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai*xi;
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_prod_32_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _prod_32_20_2(float *y,float *z) { // sum block results in y
__shared__ float buffer[64];
float ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai*xi;
}
__syncthreads();
}
if(tid<32) {
_prod_32_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
float prod_32_20(int n, float *x) {
float r;
static float *y;
static float *z;
if (y == NULL) hipMalloc(&y, 64*sizeof(float)); // sum for each block
if (z == NULL) hipMalloc(&z, sizeof(float)); // final sum
_prod_32_20_1<<<64,64>>>(n,x,y);
_prod_32_20_2<<<1,64>>>(y,z);
hipMemcpy(&r,z,sizeof(float),hipMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
__device__ void _prod_64_20_0(volatile double *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
double ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=ai*xi;
ai=x[i]; xi=x[i+16]; x[i]=ai*xi;
ai=x[i]; xi=x[i+ 8]; x[i]=ai*xi;
ai=x[i]; xi=x[i+ 4]; x[i]=ai*xi;
ai=x[i]; xi=x[i+ 2]; x[i]=ai*xi;
ai=x[i]; xi=x[i+ 1]; x[i]=ai*xi;
}
__global__ void _prod_64_20_1(int n, double *x, double *y) {
__shared__ double buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
double ai, xi;
// sum the elements assigned to this thread
ai = 1;
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=xi; ai=ai*xi;
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai*xi;
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_prod_64_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _prod_64_20_2(double *y,double *z) { // sum block results in y
__shared__ double buffer[64];
double ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai*xi;
}
__syncthreads();
}
if(tid<32) {
_prod_64_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
double prod_64_20(int n, double *x) {
double r;
static double *y;
static double *z;
if (y == NULL) hipMalloc(&y, 64*sizeof(double)); // sum for each block
if (z == NULL) hipMalloc(&z, sizeof(double)); // final sum
_prod_64_20_1<<<64,64>>>(n,x,y);
_prod_64_20_2<<<1,64>>>(y,z);
hipMemcpy(&r,z,sizeof(double),hipMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
__device__ void _maximum_32_20_0(volatile float *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
float ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=(ai>xi?ai:xi);
ai=x[i]; xi=x[i+16]; x[i]=(ai>xi?ai:xi);
ai=x[i]; xi=x[i+ 8]; x[i]=(ai>xi?ai:xi);
ai=x[i]; xi=x[i+ 4]; x[i]=(ai>xi?ai:xi);
ai=x[i]; xi=x[i+ 2]; x[i]=(ai>xi?ai:xi);
ai=x[i]; xi=x[i+ 1]; x[i]=(ai>xi?ai:xi);
}
__global__ void _maximum_32_20_1(int n, float *x, float *y) {
__shared__ float buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
float ai, xi;
// sum the elements assigned to this thread
ai = (-INFINITY);
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=xi; ai=(ai>xi?ai:xi);
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai>xi?ai:xi);
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_maximum_32_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _maximum_32_20_2(float *y,float *z) { // sum block results in y
__shared__ float buffer[64];
float ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai>xi?ai:xi);
}
__syncthreads();
}
if(tid<32) {
_maximum_32_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
float maximum_32_20(int n, float *x) {
float r;
static float *y;
static float *z;
if (y == NULL) hipMalloc(&y, 64*sizeof(float)); // sum for each block
if (z == NULL) hipMalloc(&z, sizeof(float)); // final sum
_maximum_32_20_1<<<64,64>>>(n,x,y);
_maximum_32_20_2<<<1,64>>>(y,z);
hipMemcpy(&r,z,sizeof(float),hipMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
__device__ void _maximum_64_20_0(volatile double *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
double ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=(ai>xi?ai:xi);
ai=x[i]; xi=x[i+16]; x[i]=(ai>xi?ai:xi);
ai=x[i]; xi=x[i+ 8]; x[i]=(ai>xi?ai:xi);
ai=x[i]; xi=x[i+ 4]; x[i]=(ai>xi?ai:xi);
ai=x[i]; xi=x[i+ 2]; x[i]=(ai>xi?ai:xi);
ai=x[i]; xi=x[i+ 1]; x[i]=(ai>xi?ai:xi);
}
__global__ void _maximum_64_20_1(int n, double *x, double *y) {
__shared__ double buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
double ai, xi;
// sum the elements assigned to this thread
ai = (-INFINITY);
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=xi; ai=(ai>xi?ai:xi);
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai>xi?ai:xi);
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_maximum_64_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _maximum_64_20_2(double *y,double *z) { // sum block results in y
__shared__ double buffer[64];
double ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai>xi?ai:xi);
}
__syncthreads();
}
if(tid<32) {
_maximum_64_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
double maximum_64_20(int n, double *x) {
double r;
static double *y;
static double *z;
if (y == NULL) hipMalloc(&y, 64*sizeof(double)); // sum for each block
if (z == NULL) hipMalloc(&z, sizeof(double)); // final sum
_maximum_64_20_1<<<64,64>>>(n,x,y);
_maximum_64_20_2<<<1,64>>>(y,z);
hipMemcpy(&r,z,sizeof(double),hipMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
__device__ void _minimum_32_20_0(volatile float *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
float ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=(ai<xi?ai:xi);
ai=x[i]; xi=x[i+16]; x[i]=(ai<xi?ai:xi);
ai=x[i]; xi=x[i+ 8]; x[i]=(ai<xi?ai:xi);
ai=x[i]; xi=x[i+ 4]; x[i]=(ai<xi?ai:xi);
ai=x[i]; xi=x[i+ 2]; x[i]=(ai<xi?ai:xi);
ai=x[i]; xi=x[i+ 1]; x[i]=(ai<xi?ai:xi);
}
__global__ void _minimum_32_20_1(int n, float *x, float *y) {
__shared__ float buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
float ai, xi;
// sum the elements assigned to this thread
ai = INFINITY;
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=xi; ai=(ai<xi?ai:xi);
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai<xi?ai:xi);
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_minimum_32_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _minimum_32_20_2(float *y,float *z) { // sum block results in y
__shared__ float buffer[64];
float ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai<xi?ai:xi);
}
__syncthreads();
}
if(tid<32) {
_minimum_32_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
float minimum_32_20(int n, float *x) {
float r;
static float *y;
static float *z;
if (y == NULL) hipMalloc(&y, 64*sizeof(float)); // sum for each block
if (z == NULL) hipMalloc(&z, sizeof(float)); // final sum
_minimum_32_20_1<<<64,64>>>(n,x,y);
_minimum_32_20_2<<<1,64>>>(y,z);
hipMemcpy(&r,z,sizeof(float),hipMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
__device__ void _minimum_64_20_0(volatile double *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
double ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=(ai<xi?ai:xi);
ai=x[i]; xi=x[i+16]; x[i]=(ai<xi?ai:xi);
ai=x[i]; xi=x[i+ 8]; x[i]=(ai<xi?ai:xi);
ai=x[i]; xi=x[i+ 4]; x[i]=(ai<xi?ai:xi);
ai=x[i]; xi=x[i+ 2]; x[i]=(ai<xi?ai:xi);
ai=x[i]; xi=x[i+ 1]; x[i]=(ai<xi?ai:xi);
}
__global__ void _minimum_64_20_1(int n, double *x, double *y) {
__shared__ double buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
double ai, xi;
// sum the elements assigned to this thread
ai = INFINITY;
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=xi; ai=(ai<xi?ai:xi);
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai<xi?ai:xi);
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_minimum_64_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _minimum_64_20_2(double *y,double *z) { // sum block results in y
__shared__ double buffer[64];
double ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai<xi?ai:xi);
}
__syncthreads();
}
if(tid<32) {
_minimum_64_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
double minimum_64_20(int n, double *x) {
double r;
static double *y;
static double *z;
if (y == NULL) hipMalloc(&y, 64*sizeof(double)); // sum for each block
if (z == NULL) hipMalloc(&z, sizeof(double)); // final sum
_minimum_64_20_1<<<64,64>>>(n,x,y);
_minimum_64_20_2<<<1,64>>>(y,z);
hipMemcpy(&r,z,sizeof(double),hipMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
__device__ void _sumabs_32_20_0(volatile float *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
float ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=ai+xi;
ai=x[i]; xi=x[i+16]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi;
}
__global__ void _sumabs_32_20_1(int n, float *x, float *y) {
__shared__ float buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
float ai, xi;
// sum the elements assigned to this thread
ai = 0;
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=(xi<0?-xi:xi); ai=ai+xi;
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_sumabs_32_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _sumabs_32_20_2(float *y,float *z) { // sum block results in y
__shared__ float buffer[64];
float ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads();
}
if(tid<32) {
_sumabs_32_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
float sumabs_32_20(int n, float *x) {
float r;
static float *y;
static float *z;
if (y == NULL) hipMalloc(&y, 64*sizeof(float)); // sum for each block
if (z == NULL) hipMalloc(&z, sizeof(float)); // final sum
_sumabs_32_20_1<<<64,64>>>(n,x,y);
_sumabs_32_20_2<<<1,64>>>(y,z);
hipMemcpy(&r,z,sizeof(float),hipMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
__device__ void _sumabs_64_20_0(volatile double *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
double ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=ai+xi;
ai=x[i]; xi=x[i+16]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi;
}
__global__ void _sumabs_64_20_1(int n, double *x, double *y) {
__shared__ double buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
double ai, xi;
// sum the elements assigned to this thread
ai = 0;
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=(xi<0?-xi:xi); ai=ai+xi;
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_sumabs_64_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _sumabs_64_20_2(double *y,double *z) { // sum block results in y
__shared__ double buffer[64];
double ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads();
}
if(tid<32) {
_sumabs_64_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
double sumabs_64_20(int n, double *x) {
double r;
static double *y;
static double *z;
if (y == NULL) hipMalloc(&y, 64*sizeof(double)); // sum for each block
if (z == NULL) hipMalloc(&z, sizeof(double)); // final sum
_sumabs_64_20_1<<<64,64>>>(n,x,y);
_sumabs_64_20_2<<<1,64>>>(y,z);
hipMemcpy(&r,z,sizeof(double),hipMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
__device__ void _sumabs2_32_20_0(volatile float *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
float ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=ai+xi;
ai=x[i]; xi=x[i+16]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi;
}
__global__ void _sumabs2_32_20_1(int n, float *x, float *y) {
__shared__ float buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
float ai, xi;
// sum the elements assigned to this thread
ai = 0;
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=(xi*xi); ai=ai+xi;
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_sumabs2_32_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _sumabs2_32_20_2(float *y,float *z) { // sum block results in y
__shared__ float buffer[64];
float ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads();
}
if(tid<32) {
_sumabs2_32_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
float sumabs2_32_20(int n, float *x) {
float r;
static float *y;
static float *z;
if (y == NULL) hipMalloc(&y, 64*sizeof(float)); // sum for each block
if (z == NULL) hipMalloc(&z, sizeof(float)); // final sum
_sumabs2_32_20_1<<<64,64>>>(n,x,y);
_sumabs2_32_20_2<<<1,64>>>(y,z);
hipMemcpy(&r,z,sizeof(float),hipMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
__device__ void _sumabs2_64_20_0(volatile double *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
double ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=ai+xi;
ai=x[i]; xi=x[i+16]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi;
}
__global__ void _sumabs2_64_20_1(int n, double *x, double *y) {
__shared__ double buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
double ai, xi;
// sum the elements assigned to this thread
ai = 0;
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=(xi*xi); ai=ai+xi;
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_sumabs2_64_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _sumabs2_64_20_2(double *y,double *z) { // sum block results in y
__shared__ double buffer[64];
double ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads();
}
if(tid<32) {
_sumabs2_64_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
double sumabs2_64_20(int n, double *x) {
double r;
static double *y;
static double *z;
if (y == NULL) hipMalloc(&y, 64*sizeof(double)); // sum for each block
if (z == NULL) hipMalloc(&z, sizeof(double)); // final sum
_sumabs2_64_20_1<<<64,64>>>(n,x,y);
_sumabs2_64_20_2<<<1,64>>>(y,z);
hipMemcpy(&r,z,sizeof(double),hipMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
__device__ void _countnz_32_20_0(volatile float *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
float ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=ai+xi;
ai=x[i]; xi=x[i+16]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi;
}
__global__ void _countnz_32_20_1(int n, float *x, float *y) {
__shared__ float buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
float ai, xi;
// sum the elements assigned to this thread
ai = 0;
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=(xi!=0); ai=ai+xi;
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_countnz_32_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _countnz_32_20_2(float *y,float *z) { // sum block results in y
__shared__ float buffer[64];
float ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads();
}
if(tid<32) {
_countnz_32_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
float countnz_32_20(int n, float *x) {
float r;
static float *y;
static float *z;
if (y == NULL) hipMalloc(&y, 64*sizeof(float)); // sum for each block
if (z == NULL) hipMalloc(&z, sizeof(float)); // final sum
_countnz_32_20_1<<<64,64>>>(n,x,y);
_countnz_32_20_2<<<1,64>>>(y,z);
hipMemcpy(&r,z,sizeof(float),hipMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
__device__ void _countnz_64_20_0(volatile double *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
double ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=ai+xi;
ai=x[i]; xi=x[i+16]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi;
}
__global__ void _countnz_64_20_1(int n, double *x, double *y) {
__shared__ double buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
double ai, xi;
// sum the elements assigned to this thread
ai = 0;
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=(xi!=0); ai=ai+xi;
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_countnz_64_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _countnz_64_20_2(double *y,double *z) { // sum block results in y
__shared__ double buffer[64];
double ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads();
}
if(tid<32) {
_countnz_64_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
double countnz_64_20(int n, double *x) {
double r;
static double *y;
static double *z;
if (y == NULL) hipMalloc(&y, 64*sizeof(double)); // sum for each block
if (z == NULL) hipMalloc(&z, sizeof(double)); // final sum
_countnz_64_20_1<<<64,64>>>(n,x,y);
_countnz_64_20_2<<<1,64>>>(y,z);
hipMemcpy(&r,z,sizeof(double),hipMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <stdio.h>
__global__ void add(float *a, float *b, float *c)
{
int id = threadIdx.x;
c[id] = a[id] + b[id];
}
int main()
{
float a[] = {1., 2., 3.};
float b[] = {4., 5., 6.};
float c[3];
float *a_;
float *b_;
float *c_;
int size = 3 * sizeof(float);
cudaMalloc((void**) &a_, size);
cudaMalloc((void**) &b_, size);
cudaMalloc((void**) &c_, size);
cudaMemcpy(a_, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(b_, b, size, cudaMemcpyHostToDevice);
dim3 dimGrid(1, 1, 1);
dim3 dimBlock(3, 1, 1);
add <<< dimGrid, dimBlock >>> (a_, b_, c_);
cudaMemcpy(c, c_, size, cudaMemcpyDeviceToHost);
cudaFree(a_);
cudaFree(b_);
cudaFree(c_);
printf("%f %f %f\n", c[0], c[1], c[2]);
} | code for sm_80
Function : _Z3addPfS_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_TID.X ; /* 0x0000000000067919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0040*/ IMAD.WIDE R2, R6, R7, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x001fc800078e0207 */
/*0050*/ IMAD.WIDE R4, R6.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x0c0fe400078e0207 */
/*0060*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*0070*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea2000c1e1900 */
/*0080*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fc800078e0207 */
/*0090*/ FADD R9, R2, R5 ; /* 0x0000000502097221 */
/* 0x004fca0000000000 */
/*00a0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00b0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00c0*/ BRA 0xc0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
__global__ void add(float *a, float *b, float *c)
{
int id = threadIdx.x;
c[id] = a[id] + b[id];
}
int main()
{
float a[] = {1., 2., 3.};
float b[] = {4., 5., 6.};
float c[3];
float *a_;
float *b_;
float *c_;
int size = 3 * sizeof(float);
cudaMalloc((void**) &a_, size);
cudaMalloc((void**) &b_, size);
cudaMalloc((void**) &c_, size);
cudaMemcpy(a_, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(b_, b, size, cudaMemcpyHostToDevice);
dim3 dimGrid(1, 1, 1);
dim3 dimBlock(3, 1, 1);
add <<< dimGrid, dimBlock >>> (a_, b_, c_);
cudaMemcpy(c, c_, size, cudaMemcpyDeviceToHost);
cudaFree(a_);
cudaFree(b_);
cudaFree(c_);
printf("%f %f %f\n", c[0], c[1], c[2]);
} | .file "tmpxft_00117571_00000000-6_cuda.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z26__device_stub__Z3addPfS_S_PfS_S_
.type _Z26__device_stub__Z3addPfS_S_PfS_S_, @function
_Z26__device_stub__Z3addPfS_S_PfS_S_:
.LFB2082:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z3addPfS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z26__device_stub__Z3addPfS_S_PfS_S_, .-_Z26__device_stub__Z3addPfS_S_PfS_S_
.globl _Z3addPfS_S_
.type _Z3addPfS_S_, @function
_Z3addPfS_S_:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z26__device_stub__Z3addPfS_S_PfS_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z3addPfS_S_, .-_Z3addPfS_S_
.section .rodata.str1.1,"aMS",@progbits,1
.LC6:
.string "%f %f %f\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
movl $0x3f800000, 52(%rsp)
movl $0x40000000, 56(%rsp)
movl $0x40400000, 60(%rsp)
movl $0x40800000, 64(%rsp)
movl $0x40a00000, 68(%rsp)
movl $0x40c00000, 72(%rsp)
movq %rsp, %rdi
movl $12, %esi
call cudaMalloc@PLT
leaq 8(%rsp), %rdi
movl $12, %esi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movl $12, %esi
call cudaMalloc@PLT
leaq 52(%rsp), %rsi
movl $1, %ecx
movl $12, %edx
movq (%rsp), %rdi
call cudaMemcpy@PLT
leaq 64(%rsp), %rsi
movl $1, %ecx
movl $12, %edx
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $3, 40(%rsp)
movl $1, 44(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 40(%rsp), %rdx
movl $1, %ecx
movq 28(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L15
.L12:
leaq 76(%rsp), %rdi
movl $2, %ecx
movl $12, %edx
movq 16(%rsp), %rsi
call cudaMemcpy@PLT
movq (%rsp), %rdi
call cudaFree@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
pxor %xmm0, %xmm0
cvtss2sd 76(%rsp), %xmm0
pxor %xmm2, %xmm2
cvtss2sd 84(%rsp), %xmm2
pxor %xmm1, %xmm1
cvtss2sd 80(%rsp), %xmm1
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $3, %eax
call __printf_chk@PLT
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L16
movl $0, %eax
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
movq 16(%rsp), %rdx
movq 8(%rsp), %rsi
movq (%rsp), %rdi
call _Z26__device_stub__Z3addPfS_S_PfS_S_
jmp .L12
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC7:
.string "_Z3addPfS_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC7(%rip), %rdx
movq %rdx, %rcx
leaq _Z3addPfS_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
__global__ void add(float *a, float *b, float *c)
{
int id = threadIdx.x;
c[id] = a[id] + b[id];
}
int main()
{
float a[] = {1., 2., 3.};
float b[] = {4., 5., 6.};
float c[3];
float *a_;
float *b_;
float *c_;
int size = 3 * sizeof(float);
cudaMalloc((void**) &a_, size);
cudaMalloc((void**) &b_, size);
cudaMalloc((void**) &c_, size);
cudaMemcpy(a_, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(b_, b, size, cudaMemcpyHostToDevice);
dim3 dimGrid(1, 1, 1);
dim3 dimBlock(3, 1, 1);
add <<< dimGrid, dimBlock >>> (a_, b_, c_);
cudaMemcpy(c, c_, size, cudaMemcpyDeviceToHost);
cudaFree(a_);
cudaFree(b_);
cudaFree(c_);
printf("%f %f %f\n", c[0], c[1], c[2]);
} | #include <hip/hip_runtime.h>
#include <stdio.h>
__global__ void add(float *a, float *b, float *c)
{
int id = threadIdx.x;
c[id] = a[id] + b[id];
}
int main()
{
float a[] = {1., 2., 3.};
float b[] = {4., 5., 6.};
float c[3];
float *a_;
float *b_;
float *c_;
int size = 3 * sizeof(float);
hipMalloc((void**) &a_, size);
hipMalloc((void**) &b_, size);
hipMalloc((void**) &c_, size);
hipMemcpy(a_, a, size, hipMemcpyHostToDevice);
hipMemcpy(b_, b, size, hipMemcpyHostToDevice);
dim3 dimGrid(1, 1, 1);
dim3 dimBlock(3, 1, 1);
add <<< dimGrid, dimBlock >>> (a_, b_, c_);
hipMemcpy(c, c_, size, hipMemcpyDeviceToHost);
hipFree(a_);
hipFree(b_);
hipFree(c_);
printf("%f %f %f\n", c[0], c[1], c[2]);
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
__global__ void add(float *a, float *b, float *c)
{
int id = threadIdx.x;
c[id] = a[id] + b[id];
}
int main()
{
float a[] = {1., 2., 3.};
float b[] = {4., 5., 6.};
float c[3];
float *a_;
float *b_;
float *c_;
int size = 3 * sizeof(float);
hipMalloc((void**) &a_, size);
hipMalloc((void**) &b_, size);
hipMalloc((void**) &c_, size);
hipMemcpy(a_, a, size, hipMemcpyHostToDevice);
hipMemcpy(b_, b, size, hipMemcpyHostToDevice);
dim3 dimGrid(1, 1, 1);
dim3 dimBlock(3, 1, 1);
add <<< dimGrid, dimBlock >>> (a_, b_, c_);
hipMemcpy(c, c_, size, hipMemcpyDeviceToHost);
hipFree(a_);
hipFree(b_);
hipFree(c_);
printf("%f %f %f\n", c[0], c[1], c[2]);
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z3addPfS_S_
.globl _Z3addPfS_S_
.p2align 8
.type _Z3addPfS_S_,@function
_Z3addPfS_S_:
s_load_b128 s[4:7], s[0:1], 0x0
v_lshlrev_b32_e32 v0, 2, v0
s_load_b64 s[0:1], s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_clause 0x1
global_load_b32 v1, v0, s[4:5]
global_load_b32 v2, v0, s[6:7]
s_waitcnt vmcnt(0)
v_add_f32_e32 v1, v1, v2
global_store_b32 v0, v1, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z3addPfS_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 24
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 8
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z3addPfS_S_, .Lfunc_end0-_Z3addPfS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 24
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z3addPfS_S_
.private_segment_fixed_size: 0
.sgpr_count: 8
.sgpr_spill_count: 0
.symbol: _Z3addPfS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
__global__ void add(float *a, float *b, float *c)
{
int id = threadIdx.x;
c[id] = a[id] + b[id];
}
int main()
{
float a[] = {1., 2., 3.};
float b[] = {4., 5., 6.};
float c[3];
float *a_;
float *b_;
float *c_;
int size = 3 * sizeof(float);
hipMalloc((void**) &a_, size);
hipMalloc((void**) &b_, size);
hipMalloc((void**) &c_, size);
hipMemcpy(a_, a, size, hipMemcpyHostToDevice);
hipMemcpy(b_, b, size, hipMemcpyHostToDevice);
dim3 dimGrid(1, 1, 1);
dim3 dimBlock(3, 1, 1);
add <<< dimGrid, dimBlock >>> (a_, b_, c_);
hipMemcpy(c, c_, size, hipMemcpyDeviceToHost);
hipFree(a_);
hipFree(b_);
hipFree(c_);
printf("%f %f %f\n", c[0], c[1], c[2]);
} | .text
.file "cuda.hip"
.globl _Z18__device_stub__addPfS_S_ # -- Begin function _Z18__device_stub__addPfS_S_
.p2align 4, 0x90
.type _Z18__device_stub__addPfS_S_,@function
_Z18__device_stub__addPfS_S_: # @_Z18__device_stub__addPfS_S_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z3addPfS_S_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z18__device_stub__addPfS_S_, .Lfunc_end0-_Z18__device_stub__addPfS_S_
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movabsq $4611686019492741120, %rax # imm = 0x400000003F800000
movq %rax, 140(%rsp)
movl $1077936128, 148(%rsp) # imm = 0x40400000
movabsq $4656722015783223296, %rax # imm = 0x40A0000040800000
movq %rax, 128(%rsp)
movl $1086324736, 136(%rsp) # imm = 0x40C00000
leaq 24(%rsp), %rdi
movl $12, %esi
callq hipMalloc
leaq 16(%rsp), %rdi
movl $12, %esi
callq hipMalloc
leaq 8(%rsp), %rdi
movl $12, %esi
callq hipMalloc
movq 24(%rsp), %rdi
leaq 140(%rsp), %rsi
movl $12, %edx
movl $1, %ecx
callq hipMemcpy
movq 16(%rsp), %rdi
leaq 128(%rsp), %rsi
movl $12, %edx
movl $1, %ecx
callq hipMemcpy
movabsq $4294967297, %rdi # imm = 0x100000001
leaq 2(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
movq 24(%rsp), %rax
movq 16(%rsp), %rcx
movq 8(%rsp), %rdx
movq %rax, 120(%rsp)
movq %rcx, 112(%rsp)
movq %rdx, 104(%rsp)
leaq 120(%rsp), %rax
movq %rax, 32(%rsp)
leaq 112(%rsp), %rax
movq %rax, 40(%rsp)
leaq 104(%rsp), %rax
movq %rax, 48(%rsp)
leaq 88(%rsp), %rdi
leaq 72(%rsp), %rsi
leaq 64(%rsp), %rdx
leaq 56(%rsp), %rcx
callq __hipPopCallConfiguration
movq 88(%rsp), %rsi
movl 96(%rsp), %edx
movq 72(%rsp), %rcx
movl 80(%rsp), %r8d
leaq 32(%rsp), %r9
movl $_Z3addPfS_S_, %edi
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
pushq 72(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
movq 8(%rsp), %rsi
leaq 32(%rsp), %rdi
movl $12, %edx
movl $2, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movss 32(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss 36(%rsp), %xmm1 # xmm1 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
cvtss2sd %xmm1, %xmm1
movss 40(%rsp), %xmm2 # xmm2 = mem[0],zero,zero,zero
cvtss2sd %xmm2, %xmm2
movl $.L.str, %edi
movb $3, %al
callq printf
xorl %eax, %eax
addq $152, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z3addPfS_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z3addPfS_S_,@object # @_Z3addPfS_S_
.section .rodata,"a",@progbits
.globl _Z3addPfS_S_
.p2align 3, 0x0
_Z3addPfS_S_:
.quad _Z18__device_stub__addPfS_S_
.size _Z3addPfS_S_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "%f %f %f\n"
.size .L.str, 10
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z3addPfS_S_"
.size .L__unnamed_1, 13
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z18__device_stub__addPfS_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z3addPfS_S_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z3addPfS_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_TID.X ; /* 0x0000000000067919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0040*/ IMAD.WIDE R2, R6, R7, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x001fc800078e0207 */
/*0050*/ IMAD.WIDE R4, R6.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x0c0fe400078e0207 */
/*0060*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*0070*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea2000c1e1900 */
/*0080*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fc800078e0207 */
/*0090*/ FADD R9, R2, R5 ; /* 0x0000000502097221 */
/* 0x004fca0000000000 */
/*00a0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00b0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00c0*/ BRA 0xc0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z3addPfS_S_
.globl _Z3addPfS_S_
.p2align 8
.type _Z3addPfS_S_,@function
_Z3addPfS_S_:
s_load_b128 s[4:7], s[0:1], 0x0
v_lshlrev_b32_e32 v0, 2, v0
s_load_b64 s[0:1], s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_clause 0x1
global_load_b32 v1, v0, s[4:5]
global_load_b32 v2, v0, s[6:7]
s_waitcnt vmcnt(0)
v_add_f32_e32 v1, v1, v2
global_store_b32 v0, v1, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z3addPfS_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 24
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 8
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z3addPfS_S_, .Lfunc_end0-_Z3addPfS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 24
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z3addPfS_S_
.private_segment_fixed_size: 0
.sgpr_count: 8
.sgpr_spill_count: 0
.symbol: _Z3addPfS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00117571_00000000-6_cuda.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z26__device_stub__Z3addPfS_S_PfS_S_
.type _Z26__device_stub__Z3addPfS_S_PfS_S_, @function
_Z26__device_stub__Z3addPfS_S_PfS_S_:
.LFB2082:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z3addPfS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z26__device_stub__Z3addPfS_S_PfS_S_, .-_Z26__device_stub__Z3addPfS_S_PfS_S_
.globl _Z3addPfS_S_
.type _Z3addPfS_S_, @function
_Z3addPfS_S_:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z26__device_stub__Z3addPfS_S_PfS_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z3addPfS_S_, .-_Z3addPfS_S_
.section .rodata.str1.1,"aMS",@progbits,1
.LC6:
.string "%f %f %f\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
movl $0x3f800000, 52(%rsp)
movl $0x40000000, 56(%rsp)
movl $0x40400000, 60(%rsp)
movl $0x40800000, 64(%rsp)
movl $0x40a00000, 68(%rsp)
movl $0x40c00000, 72(%rsp)
movq %rsp, %rdi
movl $12, %esi
call cudaMalloc@PLT
leaq 8(%rsp), %rdi
movl $12, %esi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movl $12, %esi
call cudaMalloc@PLT
leaq 52(%rsp), %rsi
movl $1, %ecx
movl $12, %edx
movq (%rsp), %rdi
call cudaMemcpy@PLT
leaq 64(%rsp), %rsi
movl $1, %ecx
movl $12, %edx
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $3, 40(%rsp)
movl $1, 44(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 40(%rsp), %rdx
movl $1, %ecx
movq 28(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L15
.L12:
leaq 76(%rsp), %rdi
movl $2, %ecx
movl $12, %edx
movq 16(%rsp), %rsi
call cudaMemcpy@PLT
movq (%rsp), %rdi
call cudaFree@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
pxor %xmm0, %xmm0
cvtss2sd 76(%rsp), %xmm0
pxor %xmm2, %xmm2
cvtss2sd 84(%rsp), %xmm2
pxor %xmm1, %xmm1
cvtss2sd 80(%rsp), %xmm1
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $3, %eax
call __printf_chk@PLT
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L16
movl $0, %eax
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
movq 16(%rsp), %rdx
movq 8(%rsp), %rsi
movq (%rsp), %rdi
call _Z26__device_stub__Z3addPfS_S_PfS_S_
jmp .L12
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC7:
.string "_Z3addPfS_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC7(%rip), %rdx
movq %rdx, %rcx
leaq _Z3addPfS_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "cuda.hip"
.globl _Z18__device_stub__addPfS_S_ # -- Begin function _Z18__device_stub__addPfS_S_
.p2align 4, 0x90
.type _Z18__device_stub__addPfS_S_,@function
_Z18__device_stub__addPfS_S_: # @_Z18__device_stub__addPfS_S_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z3addPfS_S_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z18__device_stub__addPfS_S_, .Lfunc_end0-_Z18__device_stub__addPfS_S_
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movabsq $4611686019492741120, %rax # imm = 0x400000003F800000
movq %rax, 140(%rsp)
movl $1077936128, 148(%rsp) # imm = 0x40400000
movabsq $4656722015783223296, %rax # imm = 0x40A0000040800000
movq %rax, 128(%rsp)
movl $1086324736, 136(%rsp) # imm = 0x40C00000
leaq 24(%rsp), %rdi
movl $12, %esi
callq hipMalloc
leaq 16(%rsp), %rdi
movl $12, %esi
callq hipMalloc
leaq 8(%rsp), %rdi
movl $12, %esi
callq hipMalloc
movq 24(%rsp), %rdi
leaq 140(%rsp), %rsi
movl $12, %edx
movl $1, %ecx
callq hipMemcpy
movq 16(%rsp), %rdi
leaq 128(%rsp), %rsi
movl $12, %edx
movl $1, %ecx
callq hipMemcpy
movabsq $4294967297, %rdi # imm = 0x100000001
leaq 2(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
movq 24(%rsp), %rax
movq 16(%rsp), %rcx
movq 8(%rsp), %rdx
movq %rax, 120(%rsp)
movq %rcx, 112(%rsp)
movq %rdx, 104(%rsp)
leaq 120(%rsp), %rax
movq %rax, 32(%rsp)
leaq 112(%rsp), %rax
movq %rax, 40(%rsp)
leaq 104(%rsp), %rax
movq %rax, 48(%rsp)
leaq 88(%rsp), %rdi
leaq 72(%rsp), %rsi
leaq 64(%rsp), %rdx
leaq 56(%rsp), %rcx
callq __hipPopCallConfiguration
movq 88(%rsp), %rsi
movl 96(%rsp), %edx
movq 72(%rsp), %rcx
movl 80(%rsp), %r8d
leaq 32(%rsp), %r9
movl $_Z3addPfS_S_, %edi
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
pushq 72(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
movq 8(%rsp), %rsi
leaq 32(%rsp), %rdi
movl $12, %edx
movl $2, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movss 32(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss 36(%rsp), %xmm1 # xmm1 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
cvtss2sd %xmm1, %xmm1
movss 40(%rsp), %xmm2 # xmm2 = mem[0],zero,zero,zero
cvtss2sd %xmm2, %xmm2
movl $.L.str, %edi
movb $3, %al
callq printf
xorl %eax, %eax
addq $152, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z3addPfS_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z3addPfS_S_,@object # @_Z3addPfS_S_
.section .rodata,"a",@progbits
.globl _Z3addPfS_S_
.p2align 3, 0x0
_Z3addPfS_S_:
.quad _Z18__device_stub__addPfS_S_
.size _Z3addPfS_S_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "%f %f %f\n"
.size .L.str, 10
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z3addPfS_S_"
.size .L__unnamed_1, 13
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z18__device_stub__addPfS_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z3addPfS_S_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
static char* program_name;
// Usage
__global__ void jacobiOptimizedOnDevice(float* x_next, float* A, float* x_now, float* b, int Ni, int Nj)
{
// Optimization step 1: tiling
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < Ni)
{
float sigma = 0.0;
// Optimization step 2: store index in register
// Multiplication is not executed in every iteration.
int idx_Ai = idx*Nj;
// Tried to use prefetching, but then the result is terribly wrong and I don't know why..
/*
float curr_A = A[idx_Ai];
float nxt_A;
//printf("idx=%d\n",idx);
for (int j=0; j<Nj-1; j++)
{
if (idx != j)
nxt_A = A[idx_Ai + j + 1];
sigma += curr_A * x_now[j];
//sigma += A[idx_Ai + j] * x_now[j];
curr_A = nxt_A;
//printf("curr_A=%f\n",curr_A);
}
if (idx != Nj-1)
sigma += nxt_A * x_now[Nj-1];
x_next[idx] = (b[idx] - sigma) / A[idx_Ai + idx];
*/
for (int j=0; j<Nj; j++)
if (idx != j)
sigma += A[idx_Ai + j] * x_now[j];
// Tried to use loop-ennrolling, but also here this gives a wrong result..
/*
for (int j=0; j<Nj/4; j+=4)
{
if (idx != j)
{
sigma += A[idx_Ai + j] * x_now[j];
}
if (idx != j+1)
{
sigma += A[idx_Ai + j+1] * x_now[j+1];
}
if (idx != j+2)
{
sigma += A[idx_Ai + j+2] * x_now[j+2];
}
if (idx != j+3)
{
sigma += A[idx_Ai + j+3] * x_now[j+3];
}
}*/
x_next[idx] = (b[idx] - sigma) / A[idx_Ai + idx];
}
} | .file "tmpxft_001a5583_00000000-6_jacobiOptimizedOnDevice.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z51__device_stub__Z23jacobiOptimizedOnDevicePfS_S_S_iiPfS_S_S_ii
.type _Z51__device_stub__Z23jacobiOptimizedOnDevicePfS_S_S_iiPfS_S_S_ii, @function
_Z51__device_stub__Z23jacobiOptimizedOnDevicePfS_S_S_iiPfS_S_S_ii:
.LFB2051:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movq %rcx, 16(%rsp)
movl %r8d, 12(%rsp)
movl %r9d, 8(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 12(%rsp), %rax
movq %rax, 144(%rsp)
leaq 8(%rsp), %rax
movq %rax, 152(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z23jacobiOptimizedOnDevicePfS_S_S_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z51__device_stub__Z23jacobiOptimizedOnDevicePfS_S_S_iiPfS_S_S_ii, .-_Z51__device_stub__Z23jacobiOptimizedOnDevicePfS_S_S_iiPfS_S_S_ii
.globl _Z23jacobiOptimizedOnDevicePfS_S_S_ii
.type _Z23jacobiOptimizedOnDevicePfS_S_S_ii, @function
_Z23jacobiOptimizedOnDevicePfS_S_S_ii:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z51__device_stub__Z23jacobiOptimizedOnDevicePfS_S_S_iiPfS_S_S_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z23jacobiOptimizedOnDevicePfS_S_S_ii, .-_Z23jacobiOptimizedOnDevicePfS_S_S_ii
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z23jacobiOptimizedOnDevicePfS_S_S_ii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z23jacobiOptimizedOnDevicePfS_S_S_ii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
static char* program_name;
// Usage
__global__ void jacobiOptimizedOnDevice(float* x_next, float* A, float* x_now, float* b, int Ni, int Nj)
{
// Optimization step 1: tiling
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < Ni)
{
float sigma = 0.0;
// Optimization step 2: store index in register
// Multiplication is not executed in every iteration.
int idx_Ai = idx*Nj;
// Tried to use prefetching, but then the result is terribly wrong and I don't know why..
/*
float curr_A = A[idx_Ai];
float nxt_A;
//printf("idx=%d\n",idx);
for (int j=0; j<Nj-1; j++)
{
if (idx != j)
nxt_A = A[idx_Ai + j + 1];
sigma += curr_A * x_now[j];
//sigma += A[idx_Ai + j] * x_now[j];
curr_A = nxt_A;
//printf("curr_A=%f\n",curr_A);
}
if (idx != Nj-1)
sigma += nxt_A * x_now[Nj-1];
x_next[idx] = (b[idx] - sigma) / A[idx_Ai + idx];
*/
for (int j=0; j<Nj; j++)
if (idx != j)
sigma += A[idx_Ai + j] * x_now[j];
// Tried to use loop-ennrolling, but also here this gives a wrong result..
/*
for (int j=0; j<Nj/4; j+=4)
{
if (idx != j)
{
sigma += A[idx_Ai + j] * x_now[j];
}
if (idx != j+1)
{
sigma += A[idx_Ai + j+1] * x_now[j+1];
}
if (idx != j+2)
{
sigma += A[idx_Ai + j+2] * x_now[j+2];
}
if (idx != j+3)
{
sigma += A[idx_Ai + j+3] * x_now[j+3];
}
}*/
x_next[idx] = (b[idx] - sigma) / A[idx_Ai + idx];
}
} | #include <hip/hip_runtime.h>
#include "includes.h"
static char* program_name;
// Usage
__global__ void jacobiOptimizedOnDevice(float* x_next, float* A, float* x_now, float* b, int Ni, int Nj)
{
// Optimization step 1: tiling
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < Ni)
{
float sigma = 0.0;
// Optimization step 2: store index in register
// Multiplication is not executed in every iteration.
int idx_Ai = idx*Nj;
// Tried to use prefetching, but then the result is terribly wrong and I don't know why..
/*
float curr_A = A[idx_Ai];
float nxt_A;
//printf("idx=%d\n",idx);
for (int j=0; j<Nj-1; j++)
{
if (idx != j)
nxt_A = A[idx_Ai + j + 1];
sigma += curr_A * x_now[j];
//sigma += A[idx_Ai + j] * x_now[j];
curr_A = nxt_A;
//printf("curr_A=%f\n",curr_A);
}
if (idx != Nj-1)
sigma += nxt_A * x_now[Nj-1];
x_next[idx] = (b[idx] - sigma) / A[idx_Ai + idx];
*/
for (int j=0; j<Nj; j++)
if (idx != j)
sigma += A[idx_Ai + j] * x_now[j];
// Tried to use loop-ennrolling, but also here this gives a wrong result..
/*
for (int j=0; j<Nj/4; j+=4)
{
if (idx != j)
{
sigma += A[idx_Ai + j] * x_now[j];
}
if (idx != j+1)
{
sigma += A[idx_Ai + j+1] * x_now[j+1];
}
if (idx != j+2)
{
sigma += A[idx_Ai + j+2] * x_now[j+2];
}
if (idx != j+3)
{
sigma += A[idx_Ai + j+3] * x_now[j+3];
}
}*/
x_next[idx] = (b[idx] - sigma) / A[idx_Ai + idx];
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
static char* program_name;
// Usage
__global__ void jacobiOptimizedOnDevice(float* x_next, float* A, float* x_now, float* b, int Ni, int Nj)
{
// Optimization step 1: tiling
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < Ni)
{
float sigma = 0.0;
// Optimization step 2: store index in register
// Multiplication is not executed in every iteration.
int idx_Ai = idx*Nj;
// Tried to use prefetching, but then the result is terribly wrong and I don't know why..
/*
float curr_A = A[idx_Ai];
float nxt_A;
//printf("idx=%d\n",idx);
for (int j=0; j<Nj-1; j++)
{
if (idx != j)
nxt_A = A[idx_Ai + j + 1];
sigma += curr_A * x_now[j];
//sigma += A[idx_Ai + j] * x_now[j];
curr_A = nxt_A;
//printf("curr_A=%f\n",curr_A);
}
if (idx != Nj-1)
sigma += nxt_A * x_now[Nj-1];
x_next[idx] = (b[idx] - sigma) / A[idx_Ai + idx];
*/
for (int j=0; j<Nj; j++)
if (idx != j)
sigma += A[idx_Ai + j] * x_now[j];
// Tried to use loop-ennrolling, but also here this gives a wrong result..
/*
for (int j=0; j<Nj/4; j+=4)
{
if (idx != j)
{
sigma += A[idx_Ai + j] * x_now[j];
}
if (idx != j+1)
{
sigma += A[idx_Ai + j+1] * x_now[j+1];
}
if (idx != j+2)
{
sigma += A[idx_Ai + j+2] * x_now[j+2];
}
if (idx != j+3)
{
sigma += A[idx_Ai + j+3] * x_now[j+3];
}
}*/
x_next[idx] = (b[idx] - sigma) / A[idx_Ai + idx];
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z23jacobiOptimizedOnDevicePfS_S_S_ii
.globl _Z23jacobiOptimizedOnDevicePfS_S_S_ii
.p2align 8
.type _Z23jacobiOptimizedOnDevicePfS_S_S_ii,@function
_Z23jacobiOptimizedOnDevicePfS_S_S_ii:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x34
s_load_b32 s3, s[0:1], 0x20
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_8
s_clause 0x1
s_load_b32 s6, s[0:1], 0x24
s_load_b64 s[2:3], s[0:1], 0x8
s_waitcnt lgkmcnt(0)
v_mul_lo_u32 v4, v1, s6
s_cmp_lt_i32 s6, 1
s_cbranch_scc1 .LBB0_6
s_load_b64 s[4:5], s[0:1], 0x10
v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v5, v1
s_delay_alu instid0(VALU_DEP_2)
v_mov_b32_e32 v2, v4
s_branch .LBB0_4
.p2align 6
.LBB0_3:
s_or_b32 exec_lo, exec_lo, s7
s_add_i32 s6, s6, -1
v_add_nc_u32_e32 v5, -1, v5
v_add_nc_u32_e32 v2, 1, v2
s_waitcnt lgkmcnt(0)
s_add_u32 s4, s4, 4
s_addc_u32 s5, s5, 0
s_cmp_eq_u32 s6, 0
s_cbranch_scc1 .LBB0_7
.LBB0_4:
s_mov_b32 s7, exec_lo
s_delay_alu instid0(VALU_DEP_2)
v_cmpx_ne_u32_e32 0, v5
s_cbranch_execz .LBB0_3
v_ashrrev_i32_e32 v3, 31, v2
s_waitcnt lgkmcnt(0)
s_load_b32 s8, s[4:5], 0x0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[6:7], 2, v[2:3]
v_add_co_u32 v6, vcc_lo, s2, v6
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v7, vcc_lo, s3, v7, vcc_lo
global_load_b32 v3, v[6:7], off
s_waitcnt vmcnt(0) lgkmcnt(0)
v_fmac_f32_e32 v0, s8, v3
s_branch .LBB0_3
.LBB0_6:
v_mov_b32_e32 v0, 0
.LBB0_7:
s_load_b64 s[4:5], s[0:1], 0x18
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v3, v4, v1
v_ashrrev_i32_e32 v2, 31, v1
s_load_b64 s[0:1], s[0:1], 0x0
v_ashrrev_i32_e32 v4, 31, v3
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[1:2], 2, v[1:2]
v_lshlrev_b64 v[3:4], 2, v[3:4]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v5, vcc_lo, s4, v1
v_add_co_ci_u32_e32 v6, vcc_lo, s5, v2, vcc_lo
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_u32 v3, vcc_lo, s2, v3
v_add_co_ci_u32_e32 v4, vcc_lo, s3, v4, vcc_lo
global_load_b32 v5, v[5:6], off
global_load_b32 v3, v[3:4], off
s_waitcnt vmcnt(1)
v_sub_f32_e32 v0, v5, v0
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_div_scale_f32 v4, null, v3, v3, v0
v_div_scale_f32 v7, vcc_lo, v0, v3, v0
v_rcp_f32_e32 v5, v4
s_waitcnt_depctr 0xfff
v_fma_f32 v6, -v4, v5, 1.0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fmac_f32_e32 v5, v6, v5
v_mul_f32_e32 v6, v7, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f32 v8, -v4, v6, v7
v_fmac_f32_e32 v6, v8, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f32 v4, -v4, v6, v7
v_div_fmas_f32 v4, v4, v5, v6
s_delay_alu instid0(VALU_DEP_1)
v_div_fixup_f32 v3, v4, v3, v0
v_add_co_u32 v0, vcc_lo, s0, v1
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v2, vcc_lo
global_store_b32 v[0:1], v3, off
.LBB0_8:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z23jacobiOptimizedOnDevicePfS_S_S_ii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 296
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 9
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z23jacobiOptimizedOnDevicePfS_S_S_ii, .Lfunc_end0-_Z23jacobiOptimizedOnDevicePfS_S_S_ii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .offset: 32
.size: 4
.value_kind: by_value
- .offset: 36
.size: 4
.value_kind: by_value
- .offset: 40
.size: 4
.value_kind: hidden_block_count_x
- .offset: 44
.size: 4
.value_kind: hidden_block_count_y
- .offset: 48
.size: 4
.value_kind: hidden_block_count_z
- .offset: 52
.size: 2
.value_kind: hidden_group_size_x
- .offset: 54
.size: 2
.value_kind: hidden_group_size_y
- .offset: 56
.size: 2
.value_kind: hidden_group_size_z
- .offset: 58
.size: 2
.value_kind: hidden_remainder_x
- .offset: 60
.size: 2
.value_kind: hidden_remainder_y
- .offset: 62
.size: 2
.value_kind: hidden_remainder_z
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 104
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 296
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z23jacobiOptimizedOnDevicePfS_S_S_ii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z23jacobiOptimizedOnDevicePfS_S_S_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 9
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
static char* program_name;
// Usage
__global__ void jacobiOptimizedOnDevice(float* x_next, float* A, float* x_now, float* b, int Ni, int Nj)
{
// Optimization step 1: tiling
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < Ni)
{
float sigma = 0.0;
// Optimization step 2: store index in register
// Multiplication is not executed in every iteration.
int idx_Ai = idx*Nj;
// Tried to use prefetching, but then the result is terribly wrong and I don't know why..
/*
float curr_A = A[idx_Ai];
float nxt_A;
//printf("idx=%d\n",idx);
for (int j=0; j<Nj-1; j++)
{
if (idx != j)
nxt_A = A[idx_Ai + j + 1];
sigma += curr_A * x_now[j];
//sigma += A[idx_Ai + j] * x_now[j];
curr_A = nxt_A;
//printf("curr_A=%f\n",curr_A);
}
if (idx != Nj-1)
sigma += nxt_A * x_now[Nj-1];
x_next[idx] = (b[idx] - sigma) / A[idx_Ai + idx];
*/
for (int j=0; j<Nj; j++)
if (idx != j)
sigma += A[idx_Ai + j] * x_now[j];
// Tried to use loop-ennrolling, but also here this gives a wrong result..
/*
for (int j=0; j<Nj/4; j+=4)
{
if (idx != j)
{
sigma += A[idx_Ai + j] * x_now[j];
}
if (idx != j+1)
{
sigma += A[idx_Ai + j+1] * x_now[j+1];
}
if (idx != j+2)
{
sigma += A[idx_Ai + j+2] * x_now[j+2];
}
if (idx != j+3)
{
sigma += A[idx_Ai + j+3] * x_now[j+3];
}
}*/
x_next[idx] = (b[idx] - sigma) / A[idx_Ai + idx];
}
} | .text
.file "jacobiOptimizedOnDevice.hip"
.globl _Z38__device_stub__jacobiOptimizedOnDevicePfS_S_S_ii # -- Begin function _Z38__device_stub__jacobiOptimizedOnDevicePfS_S_S_ii
.p2align 4, 0x90
.type _Z38__device_stub__jacobiOptimizedOnDevicePfS_S_S_ii,@function
_Z38__device_stub__jacobiOptimizedOnDevicePfS_S_S_ii: # @_Z38__device_stub__jacobiOptimizedOnDevicePfS_S_S_ii
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movl %r8d, 12(%rsp)
movl %r9d, 8(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 12(%rsp), %rax
movq %rax, 128(%rsp)
leaq 8(%rsp), %rax
movq %rax, 136(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z23jacobiOptimizedOnDevicePfS_S_S_ii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end0:
.size _Z38__device_stub__jacobiOptimizedOnDevicePfS_S_S_ii, .Lfunc_end0-_Z38__device_stub__jacobiOptimizedOnDevicePfS_S_S_ii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z23jacobiOptimizedOnDevicePfS_S_S_ii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z23jacobiOptimizedOnDevicePfS_S_S_ii,@object # @_Z23jacobiOptimizedOnDevicePfS_S_S_ii
.section .rodata,"a",@progbits
.globl _Z23jacobiOptimizedOnDevicePfS_S_S_ii
.p2align 3, 0x0
_Z23jacobiOptimizedOnDevicePfS_S_S_ii:
.quad _Z38__device_stub__jacobiOptimizedOnDevicePfS_S_S_ii
.size _Z23jacobiOptimizedOnDevicePfS_S_S_ii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z23jacobiOptimizedOnDevicePfS_S_S_ii"
.size .L__unnamed_1, 38
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z38__device_stub__jacobiOptimizedOnDevicePfS_S_S_ii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z23jacobiOptimizedOnDevicePfS_S_S_ii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_001a5583_00000000-6_jacobiOptimizedOnDevice.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z51__device_stub__Z23jacobiOptimizedOnDevicePfS_S_S_iiPfS_S_S_ii
.type _Z51__device_stub__Z23jacobiOptimizedOnDevicePfS_S_S_iiPfS_S_S_ii, @function
_Z51__device_stub__Z23jacobiOptimizedOnDevicePfS_S_S_iiPfS_S_S_ii:
.LFB2051:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movq %rcx, 16(%rsp)
movl %r8d, 12(%rsp)
movl %r9d, 8(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 12(%rsp), %rax
movq %rax, 144(%rsp)
leaq 8(%rsp), %rax
movq %rax, 152(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z23jacobiOptimizedOnDevicePfS_S_S_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z51__device_stub__Z23jacobiOptimizedOnDevicePfS_S_S_iiPfS_S_S_ii, .-_Z51__device_stub__Z23jacobiOptimizedOnDevicePfS_S_S_iiPfS_S_S_ii
.globl _Z23jacobiOptimizedOnDevicePfS_S_S_ii
.type _Z23jacobiOptimizedOnDevicePfS_S_S_ii, @function
_Z23jacobiOptimizedOnDevicePfS_S_S_ii:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z51__device_stub__Z23jacobiOptimizedOnDevicePfS_S_S_iiPfS_S_S_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z23jacobiOptimizedOnDevicePfS_S_S_ii, .-_Z23jacobiOptimizedOnDevicePfS_S_S_ii
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z23jacobiOptimizedOnDevicePfS_S_S_ii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z23jacobiOptimizedOnDevicePfS_S_S_ii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "jacobiOptimizedOnDevice.hip"
.globl _Z38__device_stub__jacobiOptimizedOnDevicePfS_S_S_ii # -- Begin function _Z38__device_stub__jacobiOptimizedOnDevicePfS_S_S_ii
.p2align 4, 0x90
.type _Z38__device_stub__jacobiOptimizedOnDevicePfS_S_S_ii,@function
_Z38__device_stub__jacobiOptimizedOnDevicePfS_S_S_ii: # @_Z38__device_stub__jacobiOptimizedOnDevicePfS_S_S_ii
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movl %r8d, 12(%rsp)
movl %r9d, 8(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 12(%rsp), %rax
movq %rax, 128(%rsp)
leaq 8(%rsp), %rax
movq %rax, 136(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z23jacobiOptimizedOnDevicePfS_S_S_ii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end0:
.size _Z38__device_stub__jacobiOptimizedOnDevicePfS_S_S_ii, .Lfunc_end0-_Z38__device_stub__jacobiOptimizedOnDevicePfS_S_S_ii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z23jacobiOptimizedOnDevicePfS_S_S_ii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z23jacobiOptimizedOnDevicePfS_S_S_ii,@object # @_Z23jacobiOptimizedOnDevicePfS_S_S_ii
.section .rodata,"a",@progbits
.globl _Z23jacobiOptimizedOnDevicePfS_S_S_ii
.p2align 3, 0x0
_Z23jacobiOptimizedOnDevicePfS_S_S_ii:
.quad _Z38__device_stub__jacobiOptimizedOnDevicePfS_S_S_ii
.size _Z23jacobiOptimizedOnDevicePfS_S_S_ii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z23jacobiOptimizedOnDevicePfS_S_S_ii"
.size .L__unnamed_1, 38
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z38__device_stub__jacobiOptimizedOnDevicePfS_S_S_ii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z23jacobiOptimizedOnDevicePfS_S_S_ii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | /*
* =====================================================================================
*
* Filename: cudatestfunc.cu
*
* Description:
*
* Version: 1.0
* Created: 2016年05月17日 00時26分44秒
* Revision: none
* Compiler: gcc
*
* Author: YOUR NAME (),
* Organization:
*
* =====================================================================================
*/
#include <cuda_runtime.h>
__global__ void test0(float* d_a)
{
int i = threadIdx.x;
d_a[i] *= 2.0f;
}
void cudatestfunc(float* h_a, int n)
{
float* d_a;
cudaMalloc((void**)&d_a, n * sizeof(float));
cudaMemcpy(d_a, h_a, n * sizeof(float), cudaMemcpyHostToDevice);
test0<<<1,n>>>(d_a);
cudaMemcpy(h_a, d_a, n * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_a);
}
/*
__host__ __device__ int getIndex(float x, float interval)
{
return (int)floorf(x / interval);
}
__host__ __device__ int* getIndex(const float* const x, const float* const interval, int* const xi)
{
xi[0] = getIndex(x[0], interval[0]);
xi[1] = getIndex(x[1], interval[1]);
xi[2] = getIndex(x[2], interval[2]);
xi[3] = getIndex(x[3], interval[3]);
return xi;
}
__host__ __device__ int sign(int x)
{
return (x > 0) ? 1 : ( (x < 0) ? -1 : 0 );
}
__host__ __device__ int* getDirection(const int* const from, const int* const to, int* const direction)
{
direction[0] = sign(to[0] - from[0]);
direction[1] = sign(to[1] - from[1]);
direction[2] = sign(to[2] - from[2]);
direction[3] = sign(to[3] - from[3]);
return direction;
}
__host__ __device__ bool equals(const int* const x0, const int* const x1)
{
bool ret = true;
ret = (x0[0] == x1[0]) ? ret : false;
ret = (x0[1] == x1[1]) ? ret : false;
ret = (x0[2] == x1[2]) ? ret : false;
ret = (x0[3] == x1[3]) ? ret : false;
return ret;
}
//交点を算出する
__host__ __device__ float* getCrossPoint
(
const float* const from,//線分の始点
const float* const to,//線分の終点
const int* const p_i,//セルの番号
const float* const interval,//セルの幅
const int* const dir_i,//線分の方向
int i,//交点を求める方向
float* const cross//算出する交点
)
{
//交点を求める平面の座標
float x = interval[i] * (p_i[i] + ((dir_i[i] + 1) / 2));
//交点の線分のパラメータ
float s = (x - from[i]) / (to[i] - from[i]);
//交点の算出
cross[0] = (to[0] - from[0]) * s + from[0];
cross[1] = (to[1] - from[1]) * s + from[1];
cross[2] = (to[2] - from[2]) * s + from[2];
cross[3] = (to[3] - from[3]) * s + from[3];
//i成分については誤差が出ないようにもともと求めていたxを使用する
cross[i] = x;
//交点を返却
return cross;
}
//交点が指定したセルの指定した方向の境界にいるかどうか判定する
__host__ __device__ bool atBound
(
const float* const cross,//交点
const int* const p_i,//セル番号
const float* const interval,//セルの幅
int i//方向
)
{
bool ret = true;
for(int j = 0; j < 4;j++)
{
if(j != i)
{
if((cross[i] < interval[j] * p_i[j]) || (interval[j] * (p_i[j] + 1) <= cross[i]))
{
ret = false;
}
}
else
{
if((cross[i] < interval[j] * (p_i[j] - 1)) || (interval[j] * (p_i[j] + 2) <= cross[i]))
{
ret = false;
}
}
}
return ret;
}
//線分が通過するセル境界に対し回数をカウントアップする
__host__ __device__ void countCrossing
(
const float* const from,//線分の始点
const float* const to,//線分の終点
int* const c,//カウンタ
int n,//カウンタの個数
const float* const interval//セルの幅
)
{
//fromの属するセル番号を取得
int from_i[4];
getIndex(from, interval, from_i);
//toの属するセル番号を取得
int to_i[4];
getIndex(to, interval, to_i);
//線分の各方向の向き
int direction_i[4];
getDirection(from_i, to_i, direction_i);
//どのセルにも進まない線分かチェック
if((direction_i[0] == 0) && (direction_i[1] == 0) && (direction_i[2] == 0) && (direction_i[3] == 0))
{
//同じセルにとどまっている線分なので集計なしで終了
return;
}
//ループ用のセル番号
int p_i[4] = {from_i[0], from_i[1], from_i[2], from_i[3]};
//fromからtoまでに通過するセルを求める
do
{
//次に通過するセル
int next_i[4] = {p_i[0], p_i[1], p_i[2], p_i[3]};
//p_iの次に通過するセルnext_iを求める
for(int i = 0; i < 4; i++)
{
//next_iが第i方向かどうかチェック
if(direction_i[i] == 0)
{
//この方向には進まないので別の方向をチェック
continue;
}
//toの位置に達していたらこれ以上は進まないので別の方向をチェック
if(p_i[i] == to_i[i])
{
//この方向には進まないので別の方向をチェック
continue;
}
//i方向と垂直な平面との交点を求める
float cross[4];
getCrossPoint(from, to, p_i, interval, direction_i, i, cross);
//現在のセルp_iの境界上の点であればnext_iを確定してbreakでforを抜ける
if(atBound(cross, p_i, interval, i))
{
next_i[i] += direction_i[i];
break;
}
}
//カウントアップを行う
//対象のカウンタのインデックス
int c_i = 0;//TODO カウンタのインデックスを算出する関数を作成する
for(int i = 0; i < 4; i++)
{
int c_dir_i = next_i[i] - p_i[i];
if(c_dir_i != 0)
{
c_i += 2 * i + ((c_dir_i > 0) ? 0 : 1);//向きに応じたカウンタのインデックス
break;
}
}
atomicAdd(&(c[c_i]),1);//カウンタにatomicに加算する <- __host__向けには使えない!!//TODO 修正する!
//p_iをnext_iに更新
p_i[0] = next_i[0];
p_i[1] = next_i[1];
p_i[2] = next_i[2];
p_i[3] = next_i[3];
}
while(!equals(p_i, to_i));//toのセルに到達したら終了
}
*/ | code for sm_80
Function : _Z5test0Pf
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R2, SR_TID.X ; /* 0x0000000000027919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0040*/ IMAD.WIDE R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x001fca00078e0203 */
/*0050*/ LDG.E R0, [R2.64] ; /* 0x0000000402007981 */
/* 0x000ea4000c1e1900 */
/*0060*/ FADD R5, R0, R0 ; /* 0x0000000000057221 */
/* 0x004fca0000000000 */
/*0070*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe2000c101904 */
/*0080*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0090*/ BRA 0x90; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | /*
* =====================================================================================
*
* Filename: cudatestfunc.cu
*
* Description:
*
* Version: 1.0
* Created: 2016年05月17日 00時26分44秒
* Revision: none
* Compiler: gcc
*
* Author: YOUR NAME (),
* Organization:
*
* =====================================================================================
*/
#include <cuda_runtime.h>
__global__ void test0(float* d_a)
{
int i = threadIdx.x;
d_a[i] *= 2.0f;
}
void cudatestfunc(float* h_a, int n)
{
float* d_a;
cudaMalloc((void**)&d_a, n * sizeof(float));
cudaMemcpy(d_a, h_a, n * sizeof(float), cudaMemcpyHostToDevice);
test0<<<1,n>>>(d_a);
cudaMemcpy(h_a, d_a, n * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_a);
}
/*
__host__ __device__ int getIndex(float x, float interval)
{
return (int)floorf(x / interval);
}
__host__ __device__ int* getIndex(const float* const x, const float* const interval, int* const xi)
{
xi[0] = getIndex(x[0], interval[0]);
xi[1] = getIndex(x[1], interval[1]);
xi[2] = getIndex(x[2], interval[2]);
xi[3] = getIndex(x[3], interval[3]);
return xi;
}
__host__ __device__ int sign(int x)
{
return (x > 0) ? 1 : ( (x < 0) ? -1 : 0 );
}
__host__ __device__ int* getDirection(const int* const from, const int* const to, int* const direction)
{
direction[0] = sign(to[0] - from[0]);
direction[1] = sign(to[1] - from[1]);
direction[2] = sign(to[2] - from[2]);
direction[3] = sign(to[3] - from[3]);
return direction;
}
__host__ __device__ bool equals(const int* const x0, const int* const x1)
{
bool ret = true;
ret = (x0[0] == x1[0]) ? ret : false;
ret = (x0[1] == x1[1]) ? ret : false;
ret = (x0[2] == x1[2]) ? ret : false;
ret = (x0[3] == x1[3]) ? ret : false;
return ret;
}
//交点を算出する
__host__ __device__ float* getCrossPoint
(
const float* const from,//線分の始点
const float* const to,//線分の終点
const int* const p_i,//セルの番号
const float* const interval,//セルの幅
const int* const dir_i,//線分の方向
int i,//交点を求める方向
float* const cross//算出する交点
)
{
//交点を求める平面の座標
float x = interval[i] * (p_i[i] + ((dir_i[i] + 1) / 2));
//交点の線分のパラメータ
float s = (x - from[i]) / (to[i] - from[i]);
//交点の算出
cross[0] = (to[0] - from[0]) * s + from[0];
cross[1] = (to[1] - from[1]) * s + from[1];
cross[2] = (to[2] - from[2]) * s + from[2];
cross[3] = (to[3] - from[3]) * s + from[3];
//i成分については誤差が出ないようにもともと求めていたxを使用する
cross[i] = x;
//交点を返却
return cross;
}
//交点が指定したセルの指定した方向の境界にいるかどうか判定する
__host__ __device__ bool atBound
(
const float* const cross,//交点
const int* const p_i,//セル番号
const float* const interval,//セルの幅
int i//方向
)
{
bool ret = true;
for(int j = 0; j < 4;j++)
{
if(j != i)
{
if((cross[i] < interval[j] * p_i[j]) || (interval[j] * (p_i[j] + 1) <= cross[i]))
{
ret = false;
}
}
else
{
if((cross[i] < interval[j] * (p_i[j] - 1)) || (interval[j] * (p_i[j] + 2) <= cross[i]))
{
ret = false;
}
}
}
return ret;
}
//線分が通過するセル境界に対し回数をカウントアップする
__host__ __device__ void countCrossing
(
const float* const from,//線分の始点
const float* const to,//線分の終点
int* const c,//カウンタ
int n,//カウンタの個数
const float* const interval//セルの幅
)
{
//fromの属するセル番号を取得
int from_i[4];
getIndex(from, interval, from_i);
//toの属するセル番号を取得
int to_i[4];
getIndex(to, interval, to_i);
//線分の各方向の向き
int direction_i[4];
getDirection(from_i, to_i, direction_i);
//どのセルにも進まない線分かチェック
if((direction_i[0] == 0) && (direction_i[1] == 0) && (direction_i[2] == 0) && (direction_i[3] == 0))
{
//同じセルにとどまっている線分なので集計なしで終了
return;
}
//ループ用のセル番号
int p_i[4] = {from_i[0], from_i[1], from_i[2], from_i[3]};
//fromからtoまでに通過するセルを求める
do
{
//次に通過するセル
int next_i[4] = {p_i[0], p_i[1], p_i[2], p_i[3]};
//p_iの次に通過するセルnext_iを求める
for(int i = 0; i < 4; i++)
{
//next_iが第i方向かどうかチェック
if(direction_i[i] == 0)
{
//この方向には進まないので別の方向をチェック
continue;
}
//toの位置に達していたらこれ以上は進まないので別の方向をチェック
if(p_i[i] == to_i[i])
{
//この方向には進まないので別の方向をチェック
continue;
}
//i方向と垂直な平面との交点を求める
float cross[4];
getCrossPoint(from, to, p_i, interval, direction_i, i, cross);
//現在のセルp_iの境界上の点であればnext_iを確定してbreakでforを抜ける
if(atBound(cross, p_i, interval, i))
{
next_i[i] += direction_i[i];
break;
}
}
//カウントアップを行う
//対象のカウンタのインデックス
int c_i = 0;//TODO カウンタのインデックスを算出する関数を作成する
for(int i = 0; i < 4; i++)
{
int c_dir_i = next_i[i] - p_i[i];
if(c_dir_i != 0)
{
c_i += 2 * i + ((c_dir_i > 0) ? 0 : 1);//向きに応じたカウンタのインデックス
break;
}
}
atomicAdd(&(c[c_i]),1);//カウンタにatomicに加算する <- __host__向けには使えない!!//TODO 修正する!
//p_iをnext_iに更新
p_i[0] = next_i[0];
p_i[1] = next_i[1];
p_i[2] = next_i[2];
p_i[3] = next_i[3];
}
while(!equals(p_i, to_i));//toのセルに到達したら終了
}
*/ | .file "tmpxft_0007de44_00000000-6_cudatestfunc.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2030:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2030:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z24__device_stub__Z5test0PfPf
.type _Z24__device_stub__Z5test0PfPf, @function
_Z24__device_stub__Z5test0PfPf:
.LFB2052:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 8(%rsp)
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 120
pushq 24(%rsp)
.cfi_def_cfa_offset 128
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z5test0Pf(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 112
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2052:
.size _Z24__device_stub__Z5test0PfPf, .-_Z24__device_stub__Z5test0PfPf
.globl _Z5test0Pf
.type _Z5test0Pf, @function
_Z5test0Pf:
.LFB2053:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z24__device_stub__Z5test0PfPf
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2053:
.size _Z5test0Pf, .-_Z5test0Pf
.globl _Z12cudatestfuncPfi
.type _Z12cudatestfuncPfi, @function
_Z12cudatestfuncPfi:
.LFB2027:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $48, %rsp
.cfi_def_cfa_offset 80
movq %rdi, %rbp
movl %esi, %r12d
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
movslq %esi, %rbx
salq $2, %rbx
leaq 8(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbx, %rdx
movq %rbp, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl %r12d, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 28(%rsp), %rdx
movl $1, %ecx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L15
.L12:
movl $2, %ecx
movq %rbx, %rdx
movq 8(%rsp), %rsi
movq %rbp, %rdi
call cudaMemcpy@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $48, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
movq 8(%rsp), %rdi
call _Z24__device_stub__Z5test0PfPf
jmp .L12
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2027:
.size _Z12cudatestfuncPfi, .-_Z12cudatestfuncPfi
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z5test0Pf"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2055:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z5test0Pf(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2055:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | /*
* =====================================================================================
*
* Filename: cudatestfunc.cu
*
* Description:
*
* Version: 1.0
* Created: 2016年05月17日 00時26分44秒
* Revision: none
* Compiler: gcc
*
* Author: YOUR NAME (),
* Organization:
*
* =====================================================================================
*/
#include <cuda_runtime.h>
__global__ void test0(float* d_a)
{
int i = threadIdx.x;
d_a[i] *= 2.0f;
}
void cudatestfunc(float* h_a, int n)
{
float* d_a;
cudaMalloc((void**)&d_a, n * sizeof(float));
cudaMemcpy(d_a, h_a, n * sizeof(float), cudaMemcpyHostToDevice);
test0<<<1,n>>>(d_a);
cudaMemcpy(h_a, d_a, n * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_a);
}
/*
__host__ __device__ int getIndex(float x, float interval)
{
return (int)floorf(x / interval);
}
__host__ __device__ int* getIndex(const float* const x, const float* const interval, int* const xi)
{
xi[0] = getIndex(x[0], interval[0]);
xi[1] = getIndex(x[1], interval[1]);
xi[2] = getIndex(x[2], interval[2]);
xi[3] = getIndex(x[3], interval[3]);
return xi;
}
__host__ __device__ int sign(int x)
{
return (x > 0) ? 1 : ( (x < 0) ? -1 : 0 );
}
__host__ __device__ int* getDirection(const int* const from, const int* const to, int* const direction)
{
direction[0] = sign(to[0] - from[0]);
direction[1] = sign(to[1] - from[1]);
direction[2] = sign(to[2] - from[2]);
direction[3] = sign(to[3] - from[3]);
return direction;
}
__host__ __device__ bool equals(const int* const x0, const int* const x1)
{
bool ret = true;
ret = (x0[0] == x1[0]) ? ret : false;
ret = (x0[1] == x1[1]) ? ret : false;
ret = (x0[2] == x1[2]) ? ret : false;
ret = (x0[3] == x1[3]) ? ret : false;
return ret;
}
//交点を算出する
__host__ __device__ float* getCrossPoint
(
const float* const from,//線分の始点
const float* const to,//線分の終点
const int* const p_i,//セルの番号
const float* const interval,//セルの幅
const int* const dir_i,//線分の方向
int i,//交点を求める方向
float* const cross//算出する交点
)
{
//交点を求める平面の座標
float x = interval[i] * (p_i[i] + ((dir_i[i] + 1) / 2));
//交点の線分のパラメータ
float s = (x - from[i]) / (to[i] - from[i]);
//交点の算出
cross[0] = (to[0] - from[0]) * s + from[0];
cross[1] = (to[1] - from[1]) * s + from[1];
cross[2] = (to[2] - from[2]) * s + from[2];
cross[3] = (to[3] - from[3]) * s + from[3];
//i成分については誤差が出ないようにもともと求めていたxを使用する
cross[i] = x;
//交点を返却
return cross;
}
//交点が指定したセルの指定した方向の境界にいるかどうか判定する
__host__ __device__ bool atBound
(
const float* const cross,//交点
const int* const p_i,//セル番号
const float* const interval,//セルの幅
int i//方向
)
{
bool ret = true;
for(int j = 0; j < 4;j++)
{
if(j != i)
{
if((cross[i] < interval[j] * p_i[j]) || (interval[j] * (p_i[j] + 1) <= cross[i]))
{
ret = false;
}
}
else
{
if((cross[i] < interval[j] * (p_i[j] - 1)) || (interval[j] * (p_i[j] + 2) <= cross[i]))
{
ret = false;
}
}
}
return ret;
}
//線分が通過するセル境界に対し回数をカウントアップする
__host__ __device__ void countCrossing
(
const float* const from,//線分の始点
const float* const to,//線分の終点
int* const c,//カウンタ
int n,//カウンタの個数
const float* const interval//セルの幅
)
{
//fromの属するセル番号を取得
int from_i[4];
getIndex(from, interval, from_i);
//toの属するセル番号を取得
int to_i[4];
getIndex(to, interval, to_i);
//線分の各方向の向き
int direction_i[4];
getDirection(from_i, to_i, direction_i);
//どのセルにも進まない線分かチェック
if((direction_i[0] == 0) && (direction_i[1] == 0) && (direction_i[2] == 0) && (direction_i[3] == 0))
{
//同じセルにとどまっている線分なので集計なしで終了
return;
}
//ループ用のセル番号
int p_i[4] = {from_i[0], from_i[1], from_i[2], from_i[3]};
//fromからtoまでに通過するセルを求める
do
{
//次に通過するセル
int next_i[4] = {p_i[0], p_i[1], p_i[2], p_i[3]};
//p_iの次に通過するセルnext_iを求める
for(int i = 0; i < 4; i++)
{
//next_iが第i方向かどうかチェック
if(direction_i[i] == 0)
{
//この方向には進まないので別の方向をチェック
continue;
}
//toの位置に達していたらこれ以上は進まないので別の方向をチェック
if(p_i[i] == to_i[i])
{
//この方向には進まないので別の方向をチェック
continue;
}
//i方向と垂直な平面との交点を求める
float cross[4];
getCrossPoint(from, to, p_i, interval, direction_i, i, cross);
//現在のセルp_iの境界上の点であればnext_iを確定してbreakでforを抜ける
if(atBound(cross, p_i, interval, i))
{
next_i[i] += direction_i[i];
break;
}
}
//カウントアップを行う
//対象のカウンタのインデックス
int c_i = 0;//TODO カウンタのインデックスを算出する関数を作成する
for(int i = 0; i < 4; i++)
{
int c_dir_i = next_i[i] - p_i[i];
if(c_dir_i != 0)
{
c_i += 2 * i + ((c_dir_i > 0) ? 0 : 1);//向きに応じたカウンタのインデックス
break;
}
}
atomicAdd(&(c[c_i]),1);//カウンタにatomicに加算する <- __host__向けには使えない!!//TODO 修正する!
//p_iをnext_iに更新
p_i[0] = next_i[0];
p_i[1] = next_i[1];
p_i[2] = next_i[2];
p_i[3] = next_i[3];
}
while(!equals(p_i, to_i));//toのセルに到達したら終了
}
*/ | /*
* =====================================================================================
*
* Filename: cudatestfunc.cu
*
* Description:
*
* Version: 1.0
* Created: 2016年05月17日 00時26分44秒
* Revision: none
* Compiler: gcc
*
* Author: YOUR NAME (),
* Organization:
*
* =====================================================================================
*/
#include <hip/hip_runtime.h>
__global__ void test0(float* d_a)
{
int i = threadIdx.x;
d_a[i] *= 2.0f;
}
void cudatestfunc(float* h_a, int n)
{
float* d_a;
hipMalloc((void**)&d_a, n * sizeof(float));
hipMemcpy(d_a, h_a, n * sizeof(float), hipMemcpyHostToDevice);
test0<<<1,n>>>(d_a);
hipMemcpy(h_a, d_a, n * sizeof(float), hipMemcpyDeviceToHost);
hipFree(d_a);
}
/*
__host__ __device__ int getIndex(float x, float interval)
{
return (int)floorf(x / interval);
}
__host__ __device__ int* getIndex(const float* const x, const float* const interval, int* const xi)
{
xi[0] = getIndex(x[0], interval[0]);
xi[1] = getIndex(x[1], interval[1]);
xi[2] = getIndex(x[2], interval[2]);
xi[3] = getIndex(x[3], interval[3]);
return xi;
}
__host__ __device__ int sign(int x)
{
return (x > 0) ? 1 : ( (x < 0) ? -1 : 0 );
}
__host__ __device__ int* getDirection(const int* const from, const int* const to, int* const direction)
{
direction[0] = sign(to[0] - from[0]);
direction[1] = sign(to[1] - from[1]);
direction[2] = sign(to[2] - from[2]);
direction[3] = sign(to[3] - from[3]);
return direction;
}
__host__ __device__ bool equals(const int* const x0, const int* const x1)
{
bool ret = true;
ret = (x0[0] == x1[0]) ? ret : false;
ret = (x0[1] == x1[1]) ? ret : false;
ret = (x0[2] == x1[2]) ? ret : false;
ret = (x0[3] == x1[3]) ? ret : false;
return ret;
}
//交点を算出する
__host__ __device__ float* getCrossPoint
(
const float* const from,//線分の始点
const float* const to,//線分の終点
const int* const p_i,//セルの番号
const float* const interval,//セルの幅
const int* const dir_i,//線分の方向
int i,//交点を求める方向
float* const cross//算出する交点
)
{
//交点を求める平面の座標
float x = interval[i] * (p_i[i] + ((dir_i[i] + 1) / 2));
//交点の線分のパラメータ
float s = (x - from[i]) / (to[i] - from[i]);
//交点の算出
cross[0] = (to[0] - from[0]) * s + from[0];
cross[1] = (to[1] - from[1]) * s + from[1];
cross[2] = (to[2] - from[2]) * s + from[2];
cross[3] = (to[3] - from[3]) * s + from[3];
//i成分については誤差が出ないようにもともと求めていたxを使用する
cross[i] = x;
//交点を返却
return cross;
}
//交点が指定したセルの指定した方向の境界にいるかどうか判定する
__host__ __device__ bool atBound
(
const float* const cross,//交点
const int* const p_i,//セル番号
const float* const interval,//セルの幅
int i//方向
)
{
bool ret = true;
for(int j = 0; j < 4;j++)
{
if(j != i)
{
if((cross[i] < interval[j] * p_i[j]) || (interval[j] * (p_i[j] + 1) <= cross[i]))
{
ret = false;
}
}
else
{
if((cross[i] < interval[j] * (p_i[j] - 1)) || (interval[j] * (p_i[j] + 2) <= cross[i]))
{
ret = false;
}
}
}
return ret;
}
//線分が通過するセル境界に対し回数をカウントアップする
__host__ __device__ void countCrossing
(
const float* const from,//線分の始点
const float* const to,//線分の終点
int* const c,//カウンタ
int n,//カウンタの個数
const float* const interval//セルの幅
)
{
//fromの属するセル番号を取得
int from_i[4];
getIndex(from, interval, from_i);
//toの属するセル番号を取得
int to_i[4];
getIndex(to, interval, to_i);
//線分の各方向の向き
int direction_i[4];
getDirection(from_i, to_i, direction_i);
//どのセルにも進まない線分かチェック
if((direction_i[0] == 0) && (direction_i[1] == 0) && (direction_i[2] == 0) && (direction_i[3] == 0))
{
//同じセルにとどまっている線分なので集計なしで終了
return;
}
//ループ用のセル番号
int p_i[4] = {from_i[0], from_i[1], from_i[2], from_i[3]};
//fromからtoまでに通過するセルを求める
do
{
//次に通過するセル
int next_i[4] = {p_i[0], p_i[1], p_i[2], p_i[3]};
//p_iの次に通過するセルnext_iを求める
for(int i = 0; i < 4; i++)
{
//next_iが第i方向かどうかチェック
if(direction_i[i] == 0)
{
//この方向には進まないので別の方向をチェック
continue;
}
//toの位置に達していたらこれ以上は進まないので別の方向をチェック
if(p_i[i] == to_i[i])
{
//この方向には進まないので別の方向をチェック
continue;
}
//i方向と垂直な平面との交点を求める
float cross[4];
getCrossPoint(from, to, p_i, interval, direction_i, i, cross);
//現在のセルp_iの境界上の点であればnext_iを確定してbreakでforを抜ける
if(atBound(cross, p_i, interval, i))
{
next_i[i] += direction_i[i];
break;
}
}
//カウントアップを行う
//対象のカウンタのインデックス
int c_i = 0;//TODO カウンタのインデックスを算出する関数を作成する
for(int i = 0; i < 4; i++)
{
int c_dir_i = next_i[i] - p_i[i];
if(c_dir_i != 0)
{
c_i += 2 * i + ((c_dir_i > 0) ? 0 : 1);//向きに応じたカウンタのインデックス
break;
}
}
atomicAdd(&(c[c_i]),1);//カウンタにatomicに加算する <- __host__向けには使えない!!//TODO 修正する!
//p_iをnext_iに更新
p_i[0] = next_i[0];
p_i[1] = next_i[1];
p_i[2] = next_i[2];
p_i[3] = next_i[3];
}
while(!equals(p_i, to_i));//toのセルに到達したら終了
}
*/ |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | /*
* =====================================================================================
*
* Filename: cudatestfunc.cu
*
* Description:
*
* Version: 1.0
* Created: 2016年05月17日 00時26分44秒
* Revision: none
* Compiler: gcc
*
* Author: YOUR NAME (),
* Organization:
*
* =====================================================================================
*/
#include <hip/hip_runtime.h>
__global__ void test0(float* d_a)
{
int i = threadIdx.x;
d_a[i] *= 2.0f;
}
void cudatestfunc(float* h_a, int n)
{
float* d_a;
hipMalloc((void**)&d_a, n * sizeof(float));
hipMemcpy(d_a, h_a, n * sizeof(float), hipMemcpyHostToDevice);
test0<<<1,n>>>(d_a);
hipMemcpy(h_a, d_a, n * sizeof(float), hipMemcpyDeviceToHost);
hipFree(d_a);
}
/*
__host__ __device__ int getIndex(float x, float interval)
{
return (int)floorf(x / interval);
}
__host__ __device__ int* getIndex(const float* const x, const float* const interval, int* const xi)
{
xi[0] = getIndex(x[0], interval[0]);
xi[1] = getIndex(x[1], interval[1]);
xi[2] = getIndex(x[2], interval[2]);
xi[3] = getIndex(x[3], interval[3]);
return xi;
}
__host__ __device__ int sign(int x)
{
return (x > 0) ? 1 : ( (x < 0) ? -1 : 0 );
}
__host__ __device__ int* getDirection(const int* const from, const int* const to, int* const direction)
{
direction[0] = sign(to[0] - from[0]);
direction[1] = sign(to[1] - from[1]);
direction[2] = sign(to[2] - from[2]);
direction[3] = sign(to[3] - from[3]);
return direction;
}
__host__ __device__ bool equals(const int* const x0, const int* const x1)
{
bool ret = true;
ret = (x0[0] == x1[0]) ? ret : false;
ret = (x0[1] == x1[1]) ? ret : false;
ret = (x0[2] == x1[2]) ? ret : false;
ret = (x0[3] == x1[3]) ? ret : false;
return ret;
}
//交点を算出する
__host__ __device__ float* getCrossPoint
(
const float* const from,//線分の始点
const float* const to,//線分の終点
const int* const p_i,//セルの番号
const float* const interval,//セルの幅
const int* const dir_i,//線分の方向
int i,//交点を求める方向
float* const cross//算出する交点
)
{
//交点を求める平面の座標
float x = interval[i] * (p_i[i] + ((dir_i[i] + 1) / 2));
//交点の線分のパラメータ
float s = (x - from[i]) / (to[i] - from[i]);
//交点の算出
cross[0] = (to[0] - from[0]) * s + from[0];
cross[1] = (to[1] - from[1]) * s + from[1];
cross[2] = (to[2] - from[2]) * s + from[2];
cross[3] = (to[3] - from[3]) * s + from[3];
//i成分については誤差が出ないようにもともと求めていたxを使用する
cross[i] = x;
//交点を返却
return cross;
}
//交点が指定したセルの指定した方向の境界にいるかどうか判定する
__host__ __device__ bool atBound
(
const float* const cross,//交点
const int* const p_i,//セル番号
const float* const interval,//セルの幅
int i//方向
)
{
bool ret = true;
for(int j = 0; j < 4;j++)
{
if(j != i)
{
if((cross[i] < interval[j] * p_i[j]) || (interval[j] * (p_i[j] + 1) <= cross[i]))
{
ret = false;
}
}
else
{
if((cross[i] < interval[j] * (p_i[j] - 1)) || (interval[j] * (p_i[j] + 2) <= cross[i]))
{
ret = false;
}
}
}
return ret;
}
//線分が通過するセル境界に対し回数をカウントアップする
__host__ __device__ void countCrossing
(
const float* const from,//線分の始点
const float* const to,//線分の終点
int* const c,//カウンタ
int n,//カウンタの個数
const float* const interval//セルの幅
)
{
//fromの属するセル番号を取得
int from_i[4];
getIndex(from, interval, from_i);
//toの属するセル番号を取得
int to_i[4];
getIndex(to, interval, to_i);
//線分の各方向の向き
int direction_i[4];
getDirection(from_i, to_i, direction_i);
//どのセルにも進まない線分かチェック
if((direction_i[0] == 0) && (direction_i[1] == 0) && (direction_i[2] == 0) && (direction_i[3] == 0))
{
//同じセルにとどまっている線分なので集計なしで終了
return;
}
//ループ用のセル番号
int p_i[4] = {from_i[0], from_i[1], from_i[2], from_i[3]};
//fromからtoまでに通過するセルを求める
do
{
//次に通過するセル
int next_i[4] = {p_i[0], p_i[1], p_i[2], p_i[3]};
//p_iの次に通過するセルnext_iを求める
for(int i = 0; i < 4; i++)
{
//next_iが第i方向かどうかチェック
if(direction_i[i] == 0)
{
//この方向には進まないので別の方向をチェック
continue;
}
//toの位置に達していたらこれ以上は進まないので別の方向をチェック
if(p_i[i] == to_i[i])
{
//この方向には進まないので別の方向をチェック
continue;
}
//i方向と垂直な平面との交点を求める
float cross[4];
getCrossPoint(from, to, p_i, interval, direction_i, i, cross);
//現在のセルp_iの境界上の点であればnext_iを確定してbreakでforを抜ける
if(atBound(cross, p_i, interval, i))
{
next_i[i] += direction_i[i];
break;
}
}
//カウントアップを行う
//対象のカウンタのインデックス
int c_i = 0;//TODO カウンタのインデックスを算出する関数を作成する
for(int i = 0; i < 4; i++)
{
int c_dir_i = next_i[i] - p_i[i];
if(c_dir_i != 0)
{
c_i += 2 * i + ((c_dir_i > 0) ? 0 : 1);//向きに応じたカウンタのインデックス
break;
}
}
atomicAdd(&(c[c_i]),1);//カウンタにatomicに加算する <- __host__向けには使えない!!//TODO 修正する!
//p_iをnext_iに更新
p_i[0] = next_i[0];
p_i[1] = next_i[1];
p_i[2] = next_i[2];
p_i[3] = next_i[3];
}
while(!equals(p_i, to_i));//toのセルに到達したら終了
}
*/ | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z5test0Pf
.globl _Z5test0Pf
.p2align 8
.type _Z5test0Pf,@function
_Z5test0Pf:
s_load_b64 s[0:1], s[0:1], 0x0
v_lshlrev_b32_e32 v0, 2, v0
s_waitcnt lgkmcnt(0)
global_load_b32 v1, v0, s[0:1]
s_waitcnt vmcnt(0)
v_add_f32_e32 v1, v1, v1
global_store_b32 v0, v1, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z5test0Pf
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 8
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 2
.amdhsa_next_free_sgpr 2
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z5test0Pf, .Lfunc_end0-_Z5test0Pf
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 8
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z5test0Pf
.private_segment_fixed_size: 0
.sgpr_count: 2
.sgpr_spill_count: 0
.symbol: _Z5test0Pf.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 2
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | /*
* =====================================================================================
*
* Filename: cudatestfunc.cu
*
* Description:
*
* Version: 1.0
* Created: 2016年05月17日 00時26分44秒
* Revision: none
* Compiler: gcc
*
* Author: YOUR NAME (),
* Organization:
*
* =====================================================================================
*/
#include <hip/hip_runtime.h>
__global__ void test0(float* d_a)
{
int i = threadIdx.x;
d_a[i] *= 2.0f;
}
void cudatestfunc(float* h_a, int n)
{
float* d_a;
hipMalloc((void**)&d_a, n * sizeof(float));
hipMemcpy(d_a, h_a, n * sizeof(float), hipMemcpyHostToDevice);
test0<<<1,n>>>(d_a);
hipMemcpy(h_a, d_a, n * sizeof(float), hipMemcpyDeviceToHost);
hipFree(d_a);
}
/*
__host__ __device__ int getIndex(float x, float interval)
{
return (int)floorf(x / interval);
}
__host__ __device__ int* getIndex(const float* const x, const float* const interval, int* const xi)
{
xi[0] = getIndex(x[0], interval[0]);
xi[1] = getIndex(x[1], interval[1]);
xi[2] = getIndex(x[2], interval[2]);
xi[3] = getIndex(x[3], interval[3]);
return xi;
}
__host__ __device__ int sign(int x)
{
return (x > 0) ? 1 : ( (x < 0) ? -1 : 0 );
}
__host__ __device__ int* getDirection(const int* const from, const int* const to, int* const direction)
{
direction[0] = sign(to[0] - from[0]);
direction[1] = sign(to[1] - from[1]);
direction[2] = sign(to[2] - from[2]);
direction[3] = sign(to[3] - from[3]);
return direction;
}
__host__ __device__ bool equals(const int* const x0, const int* const x1)
{
bool ret = true;
ret = (x0[0] == x1[0]) ? ret : false;
ret = (x0[1] == x1[1]) ? ret : false;
ret = (x0[2] == x1[2]) ? ret : false;
ret = (x0[3] == x1[3]) ? ret : false;
return ret;
}
//交点を算出する
__host__ __device__ float* getCrossPoint
(
const float* const from,//線分の始点
const float* const to,//線分の終点
const int* const p_i,//セルの番号
const float* const interval,//セルの幅
const int* const dir_i,//線分の方向
int i,//交点を求める方向
float* const cross//算出する交点
)
{
//交点を求める平面の座標
float x = interval[i] * (p_i[i] + ((dir_i[i] + 1) / 2));
//交点の線分のパラメータ
float s = (x - from[i]) / (to[i] - from[i]);
//交点の算出
cross[0] = (to[0] - from[0]) * s + from[0];
cross[1] = (to[1] - from[1]) * s + from[1];
cross[2] = (to[2] - from[2]) * s + from[2];
cross[3] = (to[3] - from[3]) * s + from[3];
//i成分については誤差が出ないようにもともと求めていたxを使用する
cross[i] = x;
//交点を返却
return cross;
}
//交点が指定したセルの指定した方向の境界にいるかどうか判定する
__host__ __device__ bool atBound
(
const float* const cross,//交点
const int* const p_i,//セル番号
const float* const interval,//セルの幅
int i//方向
)
{
bool ret = true;
for(int j = 0; j < 4;j++)
{
if(j != i)
{
if((cross[i] < interval[j] * p_i[j]) || (interval[j] * (p_i[j] + 1) <= cross[i]))
{
ret = false;
}
}
else
{
if((cross[i] < interval[j] * (p_i[j] - 1)) || (interval[j] * (p_i[j] + 2) <= cross[i]))
{
ret = false;
}
}
}
return ret;
}
//線分が通過するセル境界に対し回数をカウントアップする
__host__ __device__ void countCrossing
(
const float* const from,//線分の始点
const float* const to,//線分の終点
int* const c,//カウンタ
int n,//カウンタの個数
const float* const interval//セルの幅
)
{
//fromの属するセル番号を取得
int from_i[4];
getIndex(from, interval, from_i);
//toの属するセル番号を取得
int to_i[4];
getIndex(to, interval, to_i);
//線分の各方向の向き
int direction_i[4];
getDirection(from_i, to_i, direction_i);
//どのセルにも進まない線分かチェック
if((direction_i[0] == 0) && (direction_i[1] == 0) && (direction_i[2] == 0) && (direction_i[3] == 0))
{
//同じセルにとどまっている線分なので集計なしで終了
return;
}
//ループ用のセル番号
int p_i[4] = {from_i[0], from_i[1], from_i[2], from_i[3]};
//fromからtoまでに通過するセルを求める
do
{
//次に通過するセル
int next_i[4] = {p_i[0], p_i[1], p_i[2], p_i[3]};
//p_iの次に通過するセルnext_iを求める
for(int i = 0; i < 4; i++)
{
//next_iが第i方向かどうかチェック
if(direction_i[i] == 0)
{
//この方向には進まないので別の方向をチェック
continue;
}
//toの位置に達していたらこれ以上は進まないので別の方向をチェック
if(p_i[i] == to_i[i])
{
//この方向には進まないので別の方向をチェック
continue;
}
//i方向と垂直な平面との交点を求める
float cross[4];
getCrossPoint(from, to, p_i, interval, direction_i, i, cross);
//現在のセルp_iの境界上の点であればnext_iを確定してbreakでforを抜ける
if(atBound(cross, p_i, interval, i))
{
next_i[i] += direction_i[i];
break;
}
}
//カウントアップを行う
//対象のカウンタのインデックス
int c_i = 0;//TODO カウンタのインデックスを算出する関数を作成する
for(int i = 0; i < 4; i++)
{
int c_dir_i = next_i[i] - p_i[i];
if(c_dir_i != 0)
{
c_i += 2 * i + ((c_dir_i > 0) ? 0 : 1);//向きに応じたカウンタのインデックス
break;
}
}
atomicAdd(&(c[c_i]),1);//カウンタにatomicに加算する <- __host__向けには使えない!!//TODO 修正する!
//p_iをnext_iに更新
p_i[0] = next_i[0];
p_i[1] = next_i[1];
p_i[2] = next_i[2];
p_i[3] = next_i[3];
}
while(!equals(p_i, to_i));//toのセルに到達したら終了
}
*/ | .text
.file "cudatestfunc.hip"
.globl _Z20__device_stub__test0Pf # -- Begin function _Z20__device_stub__test0Pf
.p2align 4, 0x90
.type _Z20__device_stub__test0Pf,@function
_Z20__device_stub__test0Pf: # @_Z20__device_stub__test0Pf
.cfi_startproc
# %bb.0:
subq $72, %rsp
.cfi_def_cfa_offset 80
movq %rdi, 64(%rsp)
leaq 64(%rsp), %rax
movq %rax, (%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
movq %rsp, %r9
movl $_Z5test0Pf, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $88, %rsp
.cfi_adjust_cfa_offset -88
retq
.Lfunc_end0:
.size _Z20__device_stub__test0Pf, .Lfunc_end0-_Z20__device_stub__test0Pf
.cfi_endproc
# -- End function
.globl _Z12cudatestfuncPfi # -- Begin function _Z12cudatestfuncPfi
.p2align 4, 0x90
.type _Z12cudatestfuncPfi,@function
_Z12cudatestfuncPfi: # @_Z12cudatestfuncPfi
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $80, %rsp
.cfi_def_cfa_offset 112
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rdi, %r14
movslq %esi, %r15
leaq (,%r15,4), %rbx
leaq 8(%rsp), %rdi
movq %rbx, %rsi
callq hipMalloc
movq 8(%rsp), %rdi
movq %r14, %rsi
movq %rbx, %rdx
movl $1, %ecx
callq hipMemcpy
movl %r15d, %edx
movabsq $4294967296, %rdi # imm = 0x100000000
orq %rdi, %rdx
orq $1, %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
movq 8(%rsp), %rax
movq %rax, 72(%rsp)
leaq 72(%rsp), %rax
movq %rax, 16(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 16(%rsp), %r9
movl $_Z5test0Pf, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
movq 8(%rsp), %rsi
movq %r14, %rdi
movq %rbx, %rdx
movl $2, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
callq hipFree
addq $80, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z12cudatestfuncPfi, .Lfunc_end1-_Z12cudatestfuncPfi
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z5test0Pf, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z5test0Pf,@object # @_Z5test0Pf
.section .rodata,"a",@progbits
.globl _Z5test0Pf
.p2align 3, 0x0
_Z5test0Pf:
.quad _Z20__device_stub__test0Pf
.size _Z5test0Pf, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z5test0Pf"
.size .L__unnamed_1, 11
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z20__device_stub__test0Pf
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z5test0Pf
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z5test0Pf
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R2, SR_TID.X ; /* 0x0000000000027919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0040*/ IMAD.WIDE R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x001fca00078e0203 */
/*0050*/ LDG.E R0, [R2.64] ; /* 0x0000000402007981 */
/* 0x000ea4000c1e1900 */
/*0060*/ FADD R5, R0, R0 ; /* 0x0000000000057221 */
/* 0x004fca0000000000 */
/*0070*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe2000c101904 */
/*0080*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0090*/ BRA 0x90; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z5test0Pf
.globl _Z5test0Pf
.p2align 8
.type _Z5test0Pf,@function
_Z5test0Pf:
s_load_b64 s[0:1], s[0:1], 0x0
v_lshlrev_b32_e32 v0, 2, v0
s_waitcnt lgkmcnt(0)
global_load_b32 v1, v0, s[0:1]
s_waitcnt vmcnt(0)
v_add_f32_e32 v1, v1, v1
global_store_b32 v0, v1, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z5test0Pf
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 8
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 2
.amdhsa_next_free_sgpr 2
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z5test0Pf, .Lfunc_end0-_Z5test0Pf
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 8
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z5test0Pf
.private_segment_fixed_size: 0
.sgpr_count: 2
.sgpr_spill_count: 0
.symbol: _Z5test0Pf.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 2
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0007de44_00000000-6_cudatestfunc.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2030:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2030:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z24__device_stub__Z5test0PfPf
.type _Z24__device_stub__Z5test0PfPf, @function
_Z24__device_stub__Z5test0PfPf:
.LFB2052:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 8(%rsp)
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 120
pushq 24(%rsp)
.cfi_def_cfa_offset 128
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z5test0Pf(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 112
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2052:
.size _Z24__device_stub__Z5test0PfPf, .-_Z24__device_stub__Z5test0PfPf
.globl _Z5test0Pf
.type _Z5test0Pf, @function
_Z5test0Pf:
.LFB2053:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z24__device_stub__Z5test0PfPf
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2053:
.size _Z5test0Pf, .-_Z5test0Pf
.globl _Z12cudatestfuncPfi
.type _Z12cudatestfuncPfi, @function
_Z12cudatestfuncPfi:
.LFB2027:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $48, %rsp
.cfi_def_cfa_offset 80
movq %rdi, %rbp
movl %esi, %r12d
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
movslq %esi, %rbx
salq $2, %rbx
leaq 8(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbx, %rdx
movq %rbp, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl %r12d, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 28(%rsp), %rdx
movl $1, %ecx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L15
.L12:
movl $2, %ecx
movq %rbx, %rdx
movq 8(%rsp), %rsi
movq %rbp, %rdi
call cudaMemcpy@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $48, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
movq 8(%rsp), %rdi
call _Z24__device_stub__Z5test0PfPf
jmp .L12
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2027:
.size _Z12cudatestfuncPfi, .-_Z12cudatestfuncPfi
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z5test0Pf"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2055:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z5test0Pf(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2055:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "cudatestfunc.hip"
.globl _Z20__device_stub__test0Pf # -- Begin function _Z20__device_stub__test0Pf
.p2align 4, 0x90
.type _Z20__device_stub__test0Pf,@function
_Z20__device_stub__test0Pf: # @_Z20__device_stub__test0Pf
.cfi_startproc
# %bb.0:
subq $72, %rsp
.cfi_def_cfa_offset 80
movq %rdi, 64(%rsp)
leaq 64(%rsp), %rax
movq %rax, (%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
movq %rsp, %r9
movl $_Z5test0Pf, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $88, %rsp
.cfi_adjust_cfa_offset -88
retq
.Lfunc_end0:
.size _Z20__device_stub__test0Pf, .Lfunc_end0-_Z20__device_stub__test0Pf
.cfi_endproc
# -- End function
.globl _Z12cudatestfuncPfi # -- Begin function _Z12cudatestfuncPfi
.p2align 4, 0x90
.type _Z12cudatestfuncPfi,@function
_Z12cudatestfuncPfi: # @_Z12cudatestfuncPfi
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $80, %rsp
.cfi_def_cfa_offset 112
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rdi, %r14
movslq %esi, %r15
leaq (,%r15,4), %rbx
leaq 8(%rsp), %rdi
movq %rbx, %rsi
callq hipMalloc
movq 8(%rsp), %rdi
movq %r14, %rsi
movq %rbx, %rdx
movl $1, %ecx
callq hipMemcpy
movl %r15d, %edx
movabsq $4294967296, %rdi # imm = 0x100000000
orq %rdi, %rdx
orq $1, %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
movq 8(%rsp), %rax
movq %rax, 72(%rsp)
leaq 72(%rsp), %rax
movq %rax, 16(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 16(%rsp), %r9
movl $_Z5test0Pf, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
movq 8(%rsp), %rsi
movq %r14, %rdi
movq %rbx, %rdx
movl $2, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
callq hipFree
addq $80, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z12cudatestfuncPfi, .Lfunc_end1-_Z12cudatestfuncPfi
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z5test0Pf, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z5test0Pf,@object # @_Z5test0Pf
.section .rodata,"a",@progbits
.globl _Z5test0Pf
.p2align 3, 0x0
_Z5test0Pf:
.quad _Z20__device_stub__test0Pf
.size _Z5test0Pf, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z5test0Pf"
.size .L__unnamed_1, 11
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z20__device_stub__test0Pf
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z5test0Pf
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | /* Rician MLE diffusion and kurtosis tensor estimator by Viljami Sairanen (2016)
Based on algorithm in:
"Liu, Jia, Dario Gasbarra, and Juha Railavo.
"Fast Estimation of Diffusion Tensors under
Rician noise by the EM algorithm."
Journal of neuroscience methods 257 (2016) : 147 - 158" */
// to convert between single and double precision use following changes:
// double <-> double
// sqrt( <-> sqrt(
// fabs( <-> fabs(
// exp( <-> exp(
// log( <-> log(
#include <math.h>
__device__ size_t calculateGlobalIndex() {
// Which block are we?
size_t const globalBlockIndex = blockIdx.x + blockIdx.y * gridDim.x;
// Which THREAD are we within the block?
size_t const localthreadIdx = threadIdx.x + blockDim.x * threadIdx.y;
// How big is each block?
size_t const threadsPerBlock = blockDim.x*blockDim.y;
// Which THREAD are we overall?
return localthreadIdx + globalBlockIndex*threadsPerBlock;
}
__device__ double getBesseli0(double x) {
double ax, ans, y;
ax = fabs(x);
if (ax < 3.75) {
y = x / 3.75;
y *= y;
ans = 1.0 + y*(3.5156229 + y*(3.0899424 + y*(1.2067492 +
y*(0.2659732 + y*(0.360768e-1 + y*0.45813e-2)))));
ans *= exp(-ax); // scale by exp(-abs(real(x))); see matlab help for besseli
}
else {
y = 3.75 / ax;
ans = (1.0 / sqrt(ax)) * // scale by exp(-abs(real(x))); see matlab help for besseli
(0.39894228 + y * (0.1328592e-1
+ y * (0.225319e-2 + y * (-0.157565e-2 + y * (0.916281e-2
+ y * (-0.2057706e-1 + y * (0.2635537e-1 + y * (-0.1647633e-1
+ y * (0.392377e-2)))))))));
}
return ans;
}
__device__ double getBesseli1(double x) {
double ax, ans, y;
ax = fabs(x);
if (ax < 3.75) {
y = x / 3.75;
y *= y;
ans = ax * (0.5 + y *(0.87890594 + y *(0.51498869 + y *(0.15084934
+ y * (0.2658733e-1 + y * (0.301532e-2 + y * 0.32411e-3))))));
ans *= exp(-ax); // scale by exp(-abs(real(x))); see matlab help for besseli
}
else {
y = 3.75 / ax;
ans = 0.2282967e-1 + y * (-0.2895312e-1 + y * (0.1787654e-1
- y * 0.420059e-2));
ans = 0.39894228 + y * (-0.3988024e-1 + y * (-0.362018e-2
+ y * (0.163801e-2 + y * (-0.1031555e-1 + y * ans))));
ans *= 1.0 / sqrt(ax); // scale by exp(-abs(real(x))); see matlab help for besseli
}
return x < 0.0 ? -ans : ans;
}
__device__ double getMax(
double *arr,
const unsigned int length,
size_t const THREAD) {
double ans;
ans = arr[THREAD * length];
for (int i = 1; i < length; i++) {
if (arr[THREAD * length + i] > ans) {
ans = arr[THREAD * length + i];
}
}
return ans;
}
__device__ void LUdecomposition(double *a, int n, int *indx, double *vv, size_t const THREAD) {
int i, imax, j, k;
double big, dum, sum, temp;
for (i = 0; i<n; i++) {
big = 0.0;
for (j = 0; j<n; j++) {
temp = fabs(a[THREAD * n * n+ i*n + j]);
if (temp >= big) {
big = temp;
}
}
if (big == 0.0) { // Singular matrix can't compute
big = 1.0e-20;
}
vv[THREAD * n + i] = 1.0 / big;
}
for (j = 0; j<n; j++) {
for (i = 0; i<j; i++) {
sum = a[THREAD * n * n+ i*n + j];
for (k = 0; k<i; k++) {
sum -= a[THREAD * n * n+ i*n + k] * a[THREAD * n * n+ k*n + j];
}
a[THREAD * n * n+ i*n + j] = sum;
}
big = 0.0;
for (i = j; i<n; i++) {
sum = a[THREAD * n * n+ i*n + j];
for (k = 0; k<j; k++) {
sum -= a[THREAD * n * n+ i*n + k] * a[THREAD * n * n+ k*n + j];
}
a[THREAD * n * n+ i*n + j] = sum;
dum = vv[THREAD * n+ i] * fabs(sum);
if (dum >= big) {
big = dum;
imax = i;
}
}
if (j != imax) {
for (k = 0; k<n; k++) {
dum = a[THREAD * n * n+ imax*n + k];
a[imax*n + k] = a[THREAD * n * n+ j*n + k];
a[THREAD * n * n+ j*n + k] = dum;
}
vv[THREAD * n+ imax] = vv[THREAD * n+ j];
}
indx[THREAD * n+ j] = imax;
if (a[THREAD * n * n+ j*n + j] == 0.0) {
a[THREAD * n * n+ j*n + j] = 1.0e-20;
}
if (j != n) {
dum = 1.0 / a[THREAD * n * n+ j*n + j];
for (i = j + 1; i<n; i++) {
a[THREAD * n * n+ i*n + j] *= dum;
}
}
}
}
__device__ void LUsubstitutions(double *a, int n, int *indx, double *b, size_t const THREAD) {
int i, ii = 0, ip, j;
double sum;
for (i = 0; i<n; i++) {
ip = indx[(THREAD * n) + i];
sum = b[(THREAD * n) + ip];
b[(THREAD * n) + ip] = b[(THREAD * n) + i];
if (ii != 0) {
for (j = ii - 1; j<i; j++) {
sum -= a[(THREAD * n * n) + (i * n) + j] * b[(THREAD * n) + j];
}
}
else if (sum != 0) {
ii = i + 1;
}
b[(THREAD * n) + i] = sum;
}
for (i = n - 1; i >= 0; i--) {
sum = b[(THREAD * n) + i];
for (j = i + 1; j<n; j++) {
sum -= a[(THREAD * n * n) + (i * n) + j] * b[(THREAD * n) + j];
}
b[(THREAD * n) + i] = sum / a[(THREAD * n * n) + (i * n) + i];
}
}
__device__ void CholeskyDecomposition(double *a, int n, double *p, size_t const THREAD) {
int i, j, k;
double sum;
for (i = 0; i < n; i++) {
for (j = i; j < n; j++) {
sum = a[(THREAD * n * n) + (i*n) + j];
for (k = i-1; k >= 0; k--) {
sum -= a[(THREAD * n * n) + (i*n) + k]
* a[(THREAD * n * n) + (j*n) + k];
}
if (i == j) {
if (sum <= 0.0) {
sum = 1.0e-20; // Cholesky decomposition failed
}
p[THREAD*n + i] = sqrt(sum);
}
else {
a[(THREAD*n*n) + (j*n) + i] = sum / p[THREAD*n + i];
}
}
}
}
__device__ void CholeskyBacksubstitution(double *a, int n, double *p, double *b, double *x, size_t const THREAD) {
int i, k;
double sum;
for (i = 0; i < n; i++) { // Solve Ly=b, storing y in x
sum = b[THREAD*n + i];
for (k = i-1; k >= 0; k--) {
sum -= a[(THREAD*n*n) + (i*n) + k] * x[THREAD*n + k];
}
x[THREAD*n + i] = sum / p[THREAD*n + i];
}
for (i = n; i >= 0; i--) { // Solve L^(T)x=y
sum = x[THREAD*n + i];
for (k = i+1; k < n; k++) {
sum -= a[(THREAD*n*n) + (k*n) + i] * x[THREAD*n + k];
}
x[THREAD*n + i] = sum / p[THREAD*n + i];
}
}
__device__ void calculateExpZTheta(
double *expZTheta,
double *theta,
double *Z,
const unsigned int nParams,
const unsigned int nDWIs,
size_t const THREAD) {
for (int i = 0; i < nDWIs; i++) {
expZTheta[THREAD * nDWIs + i] = 0.0;
for (int j = 0; j < nParams; j++) {
expZTheta[THREAD * nDWIs + i] +=
Z[j * nDWIs + i] * theta[THREAD * nParams + j];
}
expZTheta[THREAD * nDWIs + i] = exp(expZTheta[THREAD * nDWIs + i]);
}
}
__device__ void calculateAB_1(
double *a,
double *b,
double *Y,
double *expZTheta,
double *sumYSQ,
const unsigned int nDWIs,
size_t const THREAD) {
a[THREAD] = sumYSQ[THREAD];
for (int i = 0; i < nDWIs; i++) {
a[THREAD] += expZTheta[THREAD * nDWIs + i] * expZTheta[THREAD * nDWIs + i];
b[THREAD * nDWIs + i] = Y[THREAD * nDWIs + i] * expZTheta[THREAD * nDWIs + i];
}
}
__device__ void calculateAB_2(
double *a,
double *b,
double *Y,
double *Z,
double *theta,
double *SigmaSQ,
double *expZTheta,
double *twotau,
const unsigned int nDWIs,
const unsigned int nParams,
size_t const THREAD) {
// Now indexing for i ranges [0, nDWIs-1] and j ranges [1, nParams] since first nParams is the theta(1)
a[THREAD] = 0.0;
for (int i = 0; i < nDWIs; i++) {
expZTheta[THREAD * nDWIs + i] = 0.0;
for (int j = 1; j < nParams; j++) {
expZTheta[THREAD * nDWIs + i] +=
Z[j * nDWIs + i] * theta[THREAD * nParams + j];
}
expZTheta[THREAD * nDWIs + i] = exp(expZTheta[THREAD * nDWIs + i]);
a[THREAD] += expZTheta[THREAD * nDWIs + i] * expZTheta[THREAD * nDWIs + i];
b[THREAD * nDWIs + i] = Y[THREAD * nDWIs + i] * expZTheta[THREAD * nDWIs + i];
twotau[THREAD * nDWIs + i] = b[THREAD * nDWIs + i] * exp(theta[THREAD * nParams+0]) / SigmaSQ[THREAD];
}
a[THREAD] = log(a[THREAD]);
}
__device__ void calculateEN(
double *EN,
double *twotau,
const unsigned int nDWIs,
bool *anyEN,
size_t const THREAD) {
anyEN[THREAD] = false;
for (int i = 0; i < nDWIs; i++) {
EN[THREAD * nDWIs + i] = 0.5 * twotau[THREAD * nDWIs + i] *
getBesseli1(twotau[THREAD * nDWIs + i]) /
getBesseli0(twotau[THREAD * nDWIs + i]);
if (EN[THREAD * nDWIs + i] > 0.0) {
anyEN[THREAD] = true;
}
}
}
__device__ void calculateZTheta(
double *c,
double *ZTheta,
double *theta,
double *SigmaSQ,
double *Z,
const unsigned int nDWIs,
const unsigned int nParams,
size_t const THREAD) {
// Now indexing for i ranges [0, nDWIs-1] and j ranges [1, nParams] since first nParams is the theta(1)
c[THREAD] = 2.0 * theta[THREAD * nParams+0] -
log(2.0 * SigmaSQ[THREAD]);
for (int i = 0; i < nDWIs; i++) {
ZTheta[THREAD * nDWIs + i] = 0.0;
for (int j = 1; j < nParams; j++) {
ZTheta[THREAD * nDWIs + i] +=
Z[j * nDWIs + i] * theta[THREAD * nParams + j];
}
ZTheta[THREAD * nDWIs + i] *= 2.0;
ZTheta[THREAD * nDWIs + i] += c[THREAD];
}
}
__device__ void calculateLoglikelihood(
double *loglikelihood,
double *expo,
double *ZTheta,
double *scaling,
double *expScaling,
double *EN,
const unsigned int nDWIs,
size_t const THREAD) {
loglikelihood[THREAD] = 0.0;
for (int i = 0; i < nDWIs; i++) {
expo[THREAD * nDWIs + i] = exp(ZTheta[THREAD * nDWIs + i] - scaling[THREAD]);
loglikelihood[THREAD] +=
EN[THREAD * nDWIs + i] * ZTheta[THREAD * nDWIs + i]
- expo[THREAD * nDWIs + i] * expScaling[THREAD];
}
}
__device__ void initializeInformationMatrices(
double *fisherInformation,
double *fisherInformation_sym,
const unsigned int nDeltaParams,
size_t const THREAD) {
for (int i = 0; i < nDeltaParams*nDeltaParams; i++) {
fisherInformation[THREAD * nDeltaParams*nDeltaParams + i] = 0.0;
fisherInformation_sym[THREAD * nDeltaParams*nDeltaParams + i] = 0.0;
}
}
__device__ void iterateSigmaSQ(
double *SigmaSQ,
double *SigmaSQ0,
double *tmpdouble,
double *a,
double *b,
double *twotau,
unsigned int *nIterSigmaSQ,
unsigned int iterLimitSigmaSQ,
const double toleranceSigmaSQ,
const unsigned int nDWIs,
bool *continueSigmaSQIteration,
size_t const THREAD) {
// Should be ok
continueSigmaSQIteration[THREAD] = true;
nIterSigmaSQ[THREAD] = 0;
while (continueSigmaSQIteration[THREAD]) {
(nIterSigmaSQ[THREAD])++;
SigmaSQ0[THREAD] = SigmaSQ[THREAD];
tmpdouble[THREAD] = 0.0;
for (int i = 0; i < nDWIs; i++) {
twotau[THREAD * nDWIs + i] = b[THREAD * nDWIs + i] / SigmaSQ[THREAD];
tmpdouble[THREAD] += twotau[THREAD * nDWIs + i] *
getBesseli1(twotau[THREAD * nDWIs + i]) /
getBesseli0(twotau[THREAD * nDWIs + i]);
}
SigmaSQ[THREAD] = 0.5 * a[THREAD] / ((double)(nDWIs) + tmpdouble[THREAD]);
continueSigmaSQIteration[THREAD] =
((nIterSigmaSQ[THREAD] < iterLimitSigmaSQ)
&&
(fabs(SigmaSQ[THREAD] - SigmaSQ0[THREAD]) > toleranceSigmaSQ));
}
}
__device__ void iterateS0(
double *theta,
double *theta1_old,
double *SigmaSQ,
double *a,
double *b,
double *twotau,
unsigned int *nIterS0,
unsigned int iterLimitS0,
const double toleranceS0,
const unsigned int nDWIs,
const unsigned int nParams,
bool *continueS0Iteration,
size_t const THREAD) {
continueS0Iteration[THREAD] = true;
nIterS0[THREAD] = 0;
while (continueS0Iteration[THREAD]) {
nIterS0[THREAD]++;
// Get initial theta(1) parameter
theta1_old[THREAD] = theta[THREAD * nParams+0];
// Calculate new theta(1) parameter
theta[THREAD * nParams+0] = 0.0;
for (int i = 0; i < nDWIs; i++) {
theta[THREAD * nParams+0] += (b[THREAD * nDWIs + i] *
getBesseli1(twotau[THREAD * nDWIs + i]) /
getBesseli0(twotau[THREAD * nDWIs + i]));
}
theta[THREAD * nParams+0] = log(theta[THREAD * nParams+0]) -a[THREAD];
// Update twotau for the next iteration step
for (int i = 0; i < nDWIs; i++) {
twotau[THREAD * nDWIs + i] = b[THREAD * nDWIs + i] *
exp(theta[THREAD * nParams+0]) / SigmaSQ[THREAD];
}
// Test to end while loop
continueS0Iteration[THREAD] =
((nIterS0[THREAD] < iterLimitS0)
&&
(fabs((theta[THREAD * nParams + 0] - theta1_old[THREAD]) / theta1_old[THREAD])));
}
}
__device__ void calculateFisherInformation(
double *fisherInformation,
double *fisherInformation_sym,
double *Z,
double *score,
double *DeltaTheta,
double *expo,
double *EN,
double *expScaling,
const unsigned int nDWIs,
const unsigned int nParams,
const unsigned int nDeltaParams,
size_t const THREAD) {
for (int j = 1; j < nParams; j++) {
score[THREAD * nDeltaParams + j - 1] = 0.0;
for (int i = 0; i < nDWIs; i++) {
score[THREAD * nDeltaParams + j - 1] +=
2.0 * Z[j * nDWIs + i] * (EN[THREAD * nDWIs + i] -
expo[THREAD * nDWIs + i] * expScaling[THREAD]);
for (int k = 1; k < nParams; k++) { // range of j and k are [1 to nParams]
fisherInformation[THREAD * nDeltaParams*nDeltaParams + (j - 1)*nDeltaParams + (k - 1)] +=
4.0 * Z[j * nDWIs + i] * Z[k * nDWIs + i] * expo[THREAD * nDWIs + i];
// Symmetrize Fisher Information
fisherInformation_sym[THREAD * nDeltaParams*nDeltaParams + (j - 1)*nDeltaParams + (k - 1)] =
(fisherInformation[THREAD * nDeltaParams*nDeltaParams + (j - 1)*nDeltaParams + (k - 1)] +
fisherInformation[THREAD * nDeltaParams*nDeltaParams + (j - 1)*nDeltaParams + (k - 1)]) *
0.5 * expScaling[THREAD];
}
}
DeltaTheta[THREAD * nDeltaParams + j - 1] = score[THREAD *nDeltaParams + j - 1];
}
// Make copy of symmetric Fisher information matrix
for (int i = 0; i < nDeltaParams*nDeltaParams; i++) {
fisherInformation[THREAD * nDeltaParams * nDeltaParams + i] = fisherInformation_sym[THREAD * nDeltaParams * nDeltaParams + i];
}
}
__device__ void iterateLoglikelihood(
int *indx,
double *score,
double *vv,
double *DeltaTheta,
double *Z,
double *expo,
double *theta,
double *loglikelihood,
double *loglikelihood_old,
double *new_theta,
double *regulatorLambda,
double *fisherInformation,
double *fisherInformation_sym,
double *ZTheta,
double *c,
double *scaling,
double *expScaling,
double *EN,
const unsigned int nDWIs,
const unsigned int nParams,
const unsigned int nDeltaParams,
const double regulatorLambda0,
const double regulatorRescaling,
unsigned int *nIterLoglikelihood,
const unsigned int iterLimitLoglikelihood,
const double toleranceLoglikelihood,
bool *continueLoglikelihoodIteration,
size_t const THREAD) {
nIterLoglikelihood[THREAD] = 0;
continueLoglikelihoodIteration[THREAD] = true;
regulatorLambda[THREAD] = regulatorLambda0;
while (continueLoglikelihoodIteration[THREAD]) {
nIterLoglikelihood[THREAD]++;
//loglikelihood_old[THREAD] = loglikelihood[THREAD]; // loglikelihood_old is not supposed to be updated in this loop
// Initialize DeltaTheta for LUdecomposition & substitutions
// because X = I\score calculated using LUsubstitutions actually
// replaces values in score and we don't want to loose that information
// so we have to save score into DeltaTheta variable
for (int j = 1; j < nParams; j++) {
DeltaTheta[THREAD * nDeltaParams + j - 1] = score[THREAD *nDeltaParams + j - 1];
}
// Regularize Fisher information matrix with lambda
for (int i = 0; i < nDeltaParams; i++) {
fisherInformation[THREAD * nDeltaParams*nDeltaParams + i*nDeltaParams + i] =
fisherInformation_sym[THREAD * nDeltaParams*nDeltaParams + i*nDeltaParams + i]
+ regulatorLambda[THREAD];
}
// Update regulatorLambda
regulatorLambda[THREAD] *= regulatorRescaling;
//LUdecomposition(fisherInformation, nDeltaParams, indx, vv, THREAD);
//LUsubstitutions(fisherInformation, nDeltaParams, indx, DeltaTheta, THREAD);
CholeskyDecomposition(fisherInformation, nDeltaParams, vv, THREAD);
CholeskyBacksubstitution(fisherInformation, nDeltaParams, vv, score, DeltaTheta, THREAD);
//goto THE_END_LOGLIKELIHOOD;
// Calculate new theta(2:end)
for (int i = 1; i < nParams; i++) {
new_theta[THREAD * nDeltaParams + i - 1] =
theta[THREAD * nParams + i]
+ DeltaTheta[THREAD * nDeltaParams + i - 1];
}
// Calculate ZTheta based on new_theta
for (int i = 0; i < nDWIs; i++) {
ZTheta[THREAD * nDWIs + i] = 0.0;
for (int j = 1; j < nParams; j++) {
ZTheta[THREAD * nDWIs + i] +=
Z[j* nDWIs + i] * new_theta[THREAD * nDeltaParams + j - 1];
}
ZTheta[THREAD * nDWIs + i] *= 2.0;
ZTheta[THREAD * nDWIs + i] += c[THREAD]; // c is based on theta(1) and sigmasq that are constant in this loop
}
scaling[THREAD] = getMax(ZTheta, nDWIs, THREAD);
expScaling[THREAD] = exp(scaling[THREAD]);
// Calculate new loglikelihood
// calculateLoglikelihood updates loglikelihood and expo variables
calculateLoglikelihood(loglikelihood, expo, ZTheta, scaling, expScaling, EN, nDWIs, THREAD);
// Check if new loglikelihood is NaN, if so more regulation is needed
// (f != f) is true only if f is NaN (IEEE standard)
if (loglikelihood[THREAD] != loglikelihood[THREAD]) {
// loglikelihood is NaN, check only iterations
continueLoglikelihoodIteration[THREAD] = (nIterLoglikelihood[THREAD] < iterLimitLoglikelihood);
}
else {
continueLoglikelihoodIteration[THREAD] =
((loglikelihood[THREAD] < loglikelihood_old[THREAD])
&&
(nIterLoglikelihood[THREAD] < iterLimitLoglikelihood));
}
}
//THE_END_LOGLIKELIHOOD:
}
__device__ void iterateTheta(
int *indx,
double *vv,
double *theta,
double *ZTheta,
double *c,
double *fisherInformation,
double *fisherInformation_sym,
double *score,
double *Z,
double *EN,
double *scaling,
double *expScaling,
double *expo,
double *DeltaTheta,
double *DeltaThetaScore,
double *new_theta,
double *loglikelihood,
double *loglikelihood_old,
double *regulatorLambda,
const double regulatorLambda0,
const double regulatorRescaling,
const unsigned int nDWIs,
const unsigned int nParams,
const unsigned int nDeltaParams,
unsigned int *nIterTheta,
unsigned int *nIterLoglikelihood,
const unsigned int iterLimitTheta,
const unsigned int iterLimitLoglikelihood,
const double toleranceTheta,
const double toleranceLoglikelihood,
bool *continueThetaIteration,
bool *continueLoglikelihoodIteration,
size_t const THREAD) {
// Now indexing for i ranges [0, nDWIs-1] and j ranges [1, nParams] since first nParams is the theta(1)
continueThetaIteration[THREAD] = true;
nIterTheta[THREAD] = 0;
loglikelihood_old[THREAD] = loglikelihood[THREAD];
while (continueThetaIteration[THREAD]) {
nIterTheta[THREAD]++;
calculateFisherInformation(fisherInformation, fisherInformation_sym, Z, score, DeltaTheta, expo, EN, expScaling, nDWIs, nParams, nDeltaParams, THREAD);
// Optimize loglikelihood
iterateLoglikelihood(indx, score, vv, DeltaTheta, Z, expo, theta, loglikelihood, loglikelihood_old, new_theta, regulatorLambda, fisherInformation, fisherInformation_sym, ZTheta, c, scaling, expScaling, EN, nDWIs, nParams, nDeltaParams, regulatorLambda0, regulatorRescaling, nIterLoglikelihood, iterLimitLoglikelihood, toleranceLoglikelihood, continueLoglikelihoodIteration, THREAD);
//goto THE_END_THETA;
DeltaThetaScore[THREAD] = 0.0;
for (int i = 0; i < nDeltaParams; i++) {
DeltaThetaScore[THREAD] += DeltaTheta[THREAD * nDeltaParams + i]
* score[THREAD * nDeltaParams + i];
}
// Check if new loglikelihood is NaN, if not
// update theta(2:end) and loglikelihood_old
if (loglikelihood[THREAD] != loglikelihood[THREAD]) {
// NaN, don't update variables
continueThetaIteration[THREAD] = (nIterTheta[THREAD] < iterLimitTheta);
} else {
for (int i = 1; i < nParams; i++) {
theta[THREAD * nParams + i] = new_theta[THREAD * nDeltaParams + i - 1];
}
loglikelihood_old[THREAD] = loglikelihood[THREAD];
continueThetaIteration[THREAD] =
(((DeltaThetaScore[THREAD] > toleranceTheta)
||
((loglikelihood[THREAD] - loglikelihood_old[THREAD]) > toleranceLoglikelihood))
&&
(nIterTheta[THREAD] < iterLimitTheta));
}
}
//THE_END_THETA:
}
__device__ void calculateNorms(
double *norm1,
double *norm2,
double *theta,
double *theta_old,
const unsigned int nParams,
size_t const THREAD) {
norm1[THREAD] = 0.0;
norm2[THREAD] = 0.0;
for (int i = 0; i < nParams; i++) {
norm1[THREAD] += theta_old[THREAD * nParams + i] * theta_old[THREAD * nParams + i];
norm2[THREAD] += (theta[THREAD * nParams + i] - theta_old[THREAD * nParams + i])*
(theta[THREAD * nParams + i] - theta_old[THREAD * nParams + i]);
}
norm1[THREAD] = sqrt(norm1[THREAD]);
norm2[THREAD] = sqrt(norm2[THREAD]);
}
__global__ void RicianMLE(
double *theta,
double *SigmaSQ,
double *Z,
double *fisherInformation,
double *fisherInformation_sym,
double *score,
double *DeltaTheta,
double *new_theta,
double *vv,
int *indx,
double *theta_old,
double *Y,
double *expZTheta,
double *ZTheta,
double *twotau,
double *expo,
double *EN,
double *b,
double *a,
double *c,
double *sumYSQ,
double *theta1_old,
double *SigmaSQ0,
double *SigmaSQ_old,
double *tmpdouble,
double *scaling,
double *expScaling,
double *loglikelihood,
double *loglikelihood_old,
double *regulatorLambda,
double *DeltaThetaScore,
double *norm1,
double *norm2,
unsigned int *nIterSigmaSQ,
unsigned int *nIterVoxel,
unsigned int *nIterS0,
unsigned int *nIterTheta,
unsigned int *nIterLoglikelihood,
bool *continueSigmaSQIteration,
bool *continueVoxelIteration,
bool *continueS0Iteration,
bool *continueThetaIteration,
bool *continueLoglikelihoodIteration,
bool *anyEN,
const double toleranceSigmaSQ,
const double toleranceS0,
const double toleranceTheta,
const double toleranceLoglikelihood,
const unsigned int iterLimitSigmaSQ,
const unsigned int iterLimitVoxel,
const unsigned int iterLimitS0,
const unsigned int iterLimitTheta,
const unsigned int iterLimitLoglikelihood,
const double regulatorLambda0,
const double regulatorRescaling,
const unsigned int nDWIs,
const unsigned int nParams,
const unsigned int nDeltaParams,
const unsigned int nVoxels) {
// Initial, work out which THREAD i.e. voxel we are computing
size_t const THREAD = calculateGlobalIndex();
if (THREAD >= nVoxels) {
return;
}
// First, optimize Rician loglikelihood w.r.t. SigmaSQ
calculateExpZTheta( expZTheta, theta, Z, nParams, nDWIs, THREAD);
calculateAB_1(a, b, Y, expZTheta, sumYSQ, nDWIs, THREAD);
iterateSigmaSQ(SigmaSQ, SigmaSQ0, tmpdouble, a, b, twotau, nIterSigmaSQ, iterLimitSigmaSQ, toleranceSigmaSQ, nDWIs, continueSigmaSQIteration, THREAD);
// Start voxel-wise optimization
continueVoxelIteration[THREAD] = true;
while (continueVoxelIteration[THREAD]) {
nIterVoxel[THREAD]++;
// Save initial theta and SigmaSQ to be used later to test if voxel optimization continues
SigmaSQ_old[THREAD] = SigmaSQ[THREAD];
for (int i = 0; i < nParams; i++) {
theta_old[THREAD * nParams + i] = theta[THREAD * nParams + i];
}
// Second, optimize w.r.t. S0 i.e. theta(1) with fixed theta(2:end) and SigmaSQ
// calcuateAB_2 updates a,b, expZTheta, and twotau variables
calculateAB_2(a, b, Y, Z, theta, SigmaSQ, expZTheta, twotau, nDWIs, nParams, THREAD);
// iterateS0 updates theta(1) and twotau variables
iterateS0(theta, theta1_old, SigmaSQ, a, b, twotau, nIterS0, iterLimitS0, toleranceS0, nDWIs, nParams, continueS0Iteration, THREAD);
// Third, optimize w.r.t. theta(2:end) with fixed theta(1) and SigmaSQ
// calculateEN updates conditional expectation EN and checks if any(EN > 0)
calculateEN(EN, twotau, nDWIs, anyEN, THREAD);
if (anyEN[THREAD]) {
// There is information to estimate tensor(s)
// calculateZTheta updates c and ZTheta variables
calculateZTheta(c, ZTheta, theta, SigmaSQ, Z, nDWIs, nParams, THREAD);
scaling[THREAD] = getMax(ZTheta, nDWIs, THREAD);
expScaling[THREAD] = exp(scaling[THREAD]);
// calculateLoglikelihood updates loglikelihood and expo variables
calculateLoglikelihood(loglikelihood, expo, ZTheta, scaling, expScaling, EN, nDWIs, THREAD);
initializeInformationMatrices(fisherInformation, fisherInformation_sym, nDeltaParams, THREAD);
iterateTheta(indx, vv, theta, ZTheta, c, fisherInformation, fisherInformation_sym, score, Z, EN, scaling, expScaling, expo, DeltaTheta, DeltaThetaScore, new_theta, loglikelihood, loglikelihood_old, regulatorLambda, regulatorLambda0, regulatorRescaling, nDWIs, nParams, nDeltaParams, nIterTheta, nIterLoglikelihood, iterLimitTheta, iterLimitLoglikelihood, toleranceTheta, toleranceLoglikelihood, continueThetaIteration, continueLoglikelihoodIteration, THREAD);
//goto THE_END;
}
else {
// There is no information for estimations
// Set theta(2:end) and information to zero
for (int i = 1; i < nParams; i++) {
theta[THREAD * nParams + i] = 0.0;
}
initializeInformationMatrices(fisherInformation, fisherInformation_sym, nDeltaParams, THREAD);
}
// Last, optimize w.r.t. SigmaSQ with fixed theta
calculateExpZTheta(expZTheta, theta, Z, nParams, nDWIs, THREAD);
calculateAB_1(a, b, Y, expZTheta, sumYSQ, nDWIs, THREAD);
iterateSigmaSQ(SigmaSQ, SigmaSQ0, tmpdouble, a, b, twotau, nIterSigmaSQ, iterLimitSigmaSQ, toleranceSigmaSQ, nDWIs, continueSigmaSQIteration, THREAD);
calculateNorms(norm1, norm2, theta, theta_old, nParams, THREAD);
continueVoxelIteration[THREAD] =
(((fabs((SigmaSQ[THREAD] - SigmaSQ_old[THREAD]) / SigmaSQ_old[THREAD]) > toleranceSigmaSQ)
||
((norm2[THREAD] / norm1[THREAD]) > toleranceTheta))
&&
(nIterVoxel[THREAD] < iterLimitVoxel));
}
//THE_END:
} | .file "tmpxft_000e49f6_00000000-6_RicianMLE_double.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2050:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2050:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z20calculateGlobalIndexv
.type _Z20calculateGlobalIndexv, @function
_Z20calculateGlobalIndexv:
.LFB2027:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2027:
.size _Z20calculateGlobalIndexv, .-_Z20calculateGlobalIndexv
.globl _Z11getBesseli0d
.type _Z11getBesseli0d, @function
_Z11getBesseli0d:
.LFB2028:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2028:
.size _Z11getBesseli0d, .-_Z11getBesseli0d
.globl _Z11getBesseli1d
.type _Z11getBesseli1d, @function
_Z11getBesseli1d:
.LFB2029:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2029:
.size _Z11getBesseli1d, .-_Z11getBesseli1d
.globl _Z6getMaxPdjm
.type _Z6getMaxPdjm, @function
_Z6getMaxPdjm:
.LFB2030:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2030:
.size _Z6getMaxPdjm, .-_Z6getMaxPdjm
.globl _Z15LUdecompositionPdiPiS_m
.type _Z15LUdecompositionPdiPiS_m, @function
_Z15LUdecompositionPdiPiS_m:
.LFB2031:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2031:
.size _Z15LUdecompositionPdiPiS_m, .-_Z15LUdecompositionPdiPiS_m
.globl _Z15LUsubstitutionsPdiPiS_m
.type _Z15LUsubstitutionsPdiPiS_m, @function
_Z15LUsubstitutionsPdiPiS_m:
.LFB2032:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2032:
.size _Z15LUsubstitutionsPdiPiS_m, .-_Z15LUsubstitutionsPdiPiS_m
.globl _Z21CholeskyDecompositionPdiS_m
.type _Z21CholeskyDecompositionPdiS_m, @function
_Z21CholeskyDecompositionPdiS_m:
.LFB2033:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2033:
.size _Z21CholeskyDecompositionPdiS_m, .-_Z21CholeskyDecompositionPdiS_m
.globl _Z24CholeskyBacksubstitutionPdiS_S_S_m
.type _Z24CholeskyBacksubstitutionPdiS_S_S_m, @function
_Z24CholeskyBacksubstitutionPdiS_S_S_m:
.LFB2034:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2034:
.size _Z24CholeskyBacksubstitutionPdiS_S_S_m, .-_Z24CholeskyBacksubstitutionPdiS_S_S_m
.globl _Z18calculateExpZThetaPdS_S_jjm
.type _Z18calculateExpZThetaPdS_S_jjm, @function
_Z18calculateExpZThetaPdS_S_jjm:
.LFB2035:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2035:
.size _Z18calculateExpZThetaPdS_S_jjm, .-_Z18calculateExpZThetaPdS_S_jjm
.globl _Z13calculateAB_1PdS_S_S_S_jm
.type _Z13calculateAB_1PdS_S_S_S_jm, @function
_Z13calculateAB_1PdS_S_S_S_jm:
.LFB2036:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2036:
.size _Z13calculateAB_1PdS_S_S_S_jm, .-_Z13calculateAB_1PdS_S_S_S_jm
.globl _Z13calculateAB_2PdS_S_S_S_S_S_S_jjm
.type _Z13calculateAB_2PdS_S_S_S_S_S_S_jjm, @function
_Z13calculateAB_2PdS_S_S_S_S_S_S_jjm:
.LFB2037:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2037:
.size _Z13calculateAB_2PdS_S_S_S_S_S_S_jjm, .-_Z13calculateAB_2PdS_S_S_S_S_S_S_jjm
.globl _Z11calculateENPdS_jPbm
.type _Z11calculateENPdS_jPbm, @function
_Z11calculateENPdS_jPbm:
.LFB2038:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2038:
.size _Z11calculateENPdS_jPbm, .-_Z11calculateENPdS_jPbm
.globl _Z15calculateZThetaPdS_S_S_S_jjm
.type _Z15calculateZThetaPdS_S_S_S_jjm, @function
_Z15calculateZThetaPdS_S_S_S_jjm:
.LFB2039:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2039:
.size _Z15calculateZThetaPdS_S_S_S_jjm, .-_Z15calculateZThetaPdS_S_S_S_jjm
.globl _Z22calculateLoglikelihoodPdS_S_S_S_S_jm
.type _Z22calculateLoglikelihoodPdS_S_S_S_S_jm, @function
_Z22calculateLoglikelihoodPdS_S_S_S_S_jm:
.LFB2040:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2040:
.size _Z22calculateLoglikelihoodPdS_S_S_S_S_jm, .-_Z22calculateLoglikelihoodPdS_S_S_S_S_jm
.globl _Z29initializeInformationMatricesPdS_jm
.type _Z29initializeInformationMatricesPdS_jm, @function
_Z29initializeInformationMatricesPdS_jm:
.LFB2041:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2041:
.size _Z29initializeInformationMatricesPdS_jm, .-_Z29initializeInformationMatricesPdS_jm
.globl _Z14iterateSigmaSQPdS_S_S_S_S_PjjdjPbm
.type _Z14iterateSigmaSQPdS_S_S_S_S_PjjdjPbm, @function
_Z14iterateSigmaSQPdS_S_S_S_S_PjjdjPbm:
.LFB2042:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2042:
.size _Z14iterateSigmaSQPdS_S_S_S_S_PjjdjPbm, .-_Z14iterateSigmaSQPdS_S_S_S_S_PjjdjPbm
.globl _Z9iterateS0PdS_S_S_S_S_PjjdjjPbm
.type _Z9iterateS0PdS_S_S_S_S_PjjdjjPbm, @function
_Z9iterateS0PdS_S_S_S_S_PjjdjjPbm:
.LFB2043:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2043:
.size _Z9iterateS0PdS_S_S_S_S_PjjdjjPbm, .-_Z9iterateS0PdS_S_S_S_S_PjjdjjPbm
.globl _Z26calculateFisherInformationPdS_S_S_S_S_S_S_jjjm
.type _Z26calculateFisherInformationPdS_S_S_S_S_S_S_jjjm, @function
_Z26calculateFisherInformationPdS_S_S_S_S_S_S_jjjm:
.LFB2044:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2044:
.size _Z26calculateFisherInformationPdS_S_S_S_S_S_S_jjjm, .-_Z26calculateFisherInformationPdS_S_S_S_S_S_S_jjjm
.globl _Z20iterateLoglikelihoodPiPdS0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_jjjddPjjdPbm
.type _Z20iterateLoglikelihoodPiPdS0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_jjjddPjjdPbm, @function
_Z20iterateLoglikelihoodPiPdS0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_jjjddPjjdPbm:
.LFB2045:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2045:
.size _Z20iterateLoglikelihoodPiPdS0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_jjjddPjjdPbm, .-_Z20iterateLoglikelihoodPiPdS0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_jjjddPjjdPbm
.globl _Z12iterateThetaPiPdS0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_ddjjjPjS1_jjddPbS2_m
.type _Z12iterateThetaPiPdS0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_ddjjjPjS1_jjddPbS2_m, @function
_Z12iterateThetaPiPdS0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_ddjjjPjS1_jjddPbS2_m:
.LFB2046:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2046:
.size _Z12iterateThetaPiPdS0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_ddjjjPjS1_jjddPbS2_m, .-_Z12iterateThetaPiPdS0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_ddjjjPjS1_jjddPbS2_m
.globl _Z14calculateNormsPdS_S_S_jm
.type _Z14calculateNormsPdS_S_S_jm, @function
_Z14calculateNormsPdS_S_S_jm:
.LFB2047:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2047:
.size _Z14calculateNormsPdS_S_S_jm, .-_Z14calculateNormsPdS_S_S_jm
.globl _Z138__device_stub__Z9RicianMLEPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjjPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjj
.type _Z138__device_stub__Z9RicianMLEPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjjPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjj, @function
_Z138__device_stub__Z9RicianMLEPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjjPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjj:
.LFB2072:
.cfi_startproc
endbr64
subq $952, %rsp
.cfi_def_cfa_offset 960
movq %rdi, 392(%rsp)
movq %rsi, 384(%rsp)
movq %rdx, 376(%rsp)
movq %rcx, 368(%rsp)
movq %r8, 360(%rsp)
movq %r9, 352(%rsp)
movsd %xmm0, 40(%rsp)
movsd %xmm1, 32(%rsp)
movsd %xmm2, 24(%rsp)
movsd %xmm3, 16(%rsp)
movsd %xmm4, 8(%rsp)
movsd %xmm5, (%rsp)
movq 960(%rsp), %rax
movq %rax, 344(%rsp)
movq 968(%rsp), %rax
movq %rax, 336(%rsp)
movq 976(%rsp), %rax
movq %rax, 328(%rsp)
movq 984(%rsp), %rax
movq %rax, 320(%rsp)
movq 992(%rsp), %rax
movq %rax, 312(%rsp)
movq 1000(%rsp), %rax
movq %rax, 304(%rsp)
movq 1008(%rsp), %rax
movq %rax, 296(%rsp)
movq 1016(%rsp), %rax
movq %rax, 288(%rsp)
movq 1024(%rsp), %rax
movq %rax, 280(%rsp)
movq 1032(%rsp), %rax
movq %rax, 272(%rsp)
movq 1040(%rsp), %rax
movq %rax, 264(%rsp)
movq 1048(%rsp), %rax
movq %rax, 256(%rsp)
movq 1056(%rsp), %rax
movq %rax, 248(%rsp)
movq 1064(%rsp), %rax
movq %rax, 240(%rsp)
movq 1072(%rsp), %rax
movq %rax, 232(%rsp)
movq 1080(%rsp), %rax
movq %rax, 224(%rsp)
movq 1088(%rsp), %rax
movq %rax, 216(%rsp)
movq 1096(%rsp), %rax
movq %rax, 208(%rsp)
movq 1104(%rsp), %rax
movq %rax, 200(%rsp)
movq 1112(%rsp), %rax
movq %rax, 192(%rsp)
movq 1120(%rsp), %rax
movq %rax, 184(%rsp)
movq 1128(%rsp), %rax
movq %rax, 176(%rsp)
movq 1136(%rsp), %rax
movq %rax, 168(%rsp)
movq 1144(%rsp), %rax
movq %rax, 160(%rsp)
movq 1152(%rsp), %rax
movq %rax, 152(%rsp)
movq 1160(%rsp), %rax
movq %rax, 144(%rsp)
movq 1168(%rsp), %rax
movq %rax, 136(%rsp)
movq 1176(%rsp), %rax
movq %rax, 128(%rsp)
movq 1184(%rsp), %rax
movq %rax, 120(%rsp)
movq 1192(%rsp), %rax
movq %rax, 112(%rsp)
movq 1200(%rsp), %rax
movq %rax, 104(%rsp)
movq 1208(%rsp), %rax
movq %rax, 96(%rsp)
movq 1216(%rsp), %rax
movq %rax, 88(%rsp)
movq 1224(%rsp), %rax
movq %rax, 80(%rsp)
movq 1232(%rsp), %rax
movq %rax, 72(%rsp)
movq 1240(%rsp), %rax
movq %rax, 64(%rsp)
movq 1248(%rsp), %rax
movq %rax, 56(%rsp)
movq 1256(%rsp), %rax
movq %rax, 48(%rsp)
movq %fs:40, %rax
movq %rax, 936(%rsp)
xorl %eax, %eax
leaq 392(%rsp), %rax
movq %rax, 464(%rsp)
leaq 384(%rsp), %rax
movq %rax, 472(%rsp)
leaq 376(%rsp), %rax
movq %rax, 480(%rsp)
leaq 368(%rsp), %rax
movq %rax, 488(%rsp)
leaq 360(%rsp), %rax
movq %rax, 496(%rsp)
leaq 352(%rsp), %rax
movq %rax, 504(%rsp)
leaq 344(%rsp), %rax
movq %rax, 512(%rsp)
leaq 336(%rsp), %rax
movq %rax, 520(%rsp)
leaq 328(%rsp), %rax
movq %rax, 528(%rsp)
leaq 320(%rsp), %rax
movq %rax, 536(%rsp)
leaq 312(%rsp), %rax
movq %rax, 544(%rsp)
leaq 304(%rsp), %rax
movq %rax, 552(%rsp)
leaq 296(%rsp), %rax
movq %rax, 560(%rsp)
leaq 288(%rsp), %rax
movq %rax, 568(%rsp)
leaq 280(%rsp), %rax
movq %rax, 576(%rsp)
leaq 272(%rsp), %rax
movq %rax, 584(%rsp)
leaq 264(%rsp), %rax
movq %rax, 592(%rsp)
leaq 256(%rsp), %rax
movq %rax, 600(%rsp)
leaq 248(%rsp), %rax
movq %rax, 608(%rsp)
leaq 240(%rsp), %rax
movq %rax, 616(%rsp)
leaq 232(%rsp), %rax
movq %rax, 624(%rsp)
leaq 224(%rsp), %rax
movq %rax, 632(%rsp)
leaq 216(%rsp), %rax
movq %rax, 640(%rsp)
leaq 208(%rsp), %rax
movq %rax, 648(%rsp)
leaq 200(%rsp), %rax
movq %rax, 656(%rsp)
leaq 192(%rsp), %rax
movq %rax, 664(%rsp)
leaq 184(%rsp), %rax
movq %rax, 672(%rsp)
leaq 176(%rsp), %rax
movq %rax, 680(%rsp)
leaq 168(%rsp), %rax
movq %rax, 688(%rsp)
leaq 160(%rsp), %rax
movq %rax, 696(%rsp)
leaq 152(%rsp), %rax
movq %rax, 704(%rsp)
leaq 144(%rsp), %rax
movq %rax, 712(%rsp)
leaq 136(%rsp), %rax
movq %rax, 720(%rsp)
leaq 128(%rsp), %rax
movq %rax, 728(%rsp)
leaq 120(%rsp), %rax
movq %rax, 736(%rsp)
leaq 112(%rsp), %rax
movq %rax, 744(%rsp)
leaq 104(%rsp), %rax
movq %rax, 752(%rsp)
leaq 96(%rsp), %rax
movq %rax, 760(%rsp)
leaq 88(%rsp), %rax
movq %rax, 768(%rsp)
leaq 80(%rsp), %rax
movq %rax, 776(%rsp)
leaq 72(%rsp), %rax
movq %rax, 784(%rsp)
leaq 64(%rsp), %rax
movq %rax, 792(%rsp)
leaq 56(%rsp), %rax
movq %rax, 800(%rsp)
leaq 48(%rsp), %rax
movq %rax, 808(%rsp)
leaq 40(%rsp), %rax
movq %rax, 816(%rsp)
leaq 32(%rsp), %rax
movq %rax, 824(%rsp)
leaq 24(%rsp), %rax
movq %rax, 832(%rsp)
leaq 16(%rsp), %rax
movq %rax, 840(%rsp)
leaq 1264(%rsp), %rax
movq %rax, 848(%rsp)
leaq 1272(%rsp), %rax
movq %rax, 856(%rsp)
leaq 1280(%rsp), %rax
movq %rax, 864(%rsp)
leaq 1288(%rsp), %rax
movq %rax, 872(%rsp)
leaq 1296(%rsp), %rax
movq %rax, 880(%rsp)
leaq 8(%rsp), %rax
movq %rax, 888(%rsp)
movq %rsp, %rax
movq %rax, 896(%rsp)
leaq 1304(%rsp), %rax
movq %rax, 904(%rsp)
leaq 1312(%rsp), %rax
movq %rax, 912(%rsp)
leaq 1320(%rsp), %rax
movq %rax, 920(%rsp)
leaq 1328(%rsp), %rax
movq %rax, 928(%rsp)
movl $1, 416(%rsp)
movl $1, 420(%rsp)
movl $1, 424(%rsp)
movl $1, 428(%rsp)
movl $1, 432(%rsp)
movl $1, 436(%rsp)
leaq 408(%rsp), %rcx
leaq 400(%rsp), %rdx
leaq 428(%rsp), %rsi
leaq 416(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L49
.L45:
movq 936(%rsp), %rax
subq %fs:40, %rax
jne .L50
addq $952, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L49:
.cfi_restore_state
pushq 408(%rsp)
.cfi_def_cfa_offset 968
pushq 408(%rsp)
.cfi_def_cfa_offset 976
leaq 480(%rsp), %r9
movq 444(%rsp), %rcx
movl 452(%rsp), %r8d
movq 432(%rsp), %rsi
movl 440(%rsp), %edx
leaq _Z9RicianMLEPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjj(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 960
jmp .L45
.L50:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2072:
.size _Z138__device_stub__Z9RicianMLEPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjjPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjj, .-_Z138__device_stub__Z9RicianMLEPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjjPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjj
.globl _Z9RicianMLEPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjj
.type _Z9RicianMLEPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjj, @function
_Z9RicianMLEPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjj:
.LFB2073:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
movl 392(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
movl 392(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 40
movl 392(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 48
movl 392(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 56
movl 392(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 64
movl 392(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 72
movl 392(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 80
movl 392(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 88
movl 392(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 96
pushq 392(%rsp)
.cfi_def_cfa_offset 104
pushq 392(%rsp)
.cfi_def_cfa_offset 112
pushq 392(%rsp)
.cfi_def_cfa_offset 120
pushq 392(%rsp)
.cfi_def_cfa_offset 128
pushq 392(%rsp)
.cfi_def_cfa_offset 136
pushq 392(%rsp)
.cfi_def_cfa_offset 144
pushq 392(%rsp)
.cfi_def_cfa_offset 152
pushq 392(%rsp)
.cfi_def_cfa_offset 160
pushq 392(%rsp)
.cfi_def_cfa_offset 168
pushq 392(%rsp)
.cfi_def_cfa_offset 176
pushq 392(%rsp)
.cfi_def_cfa_offset 184
pushq 392(%rsp)
.cfi_def_cfa_offset 192
pushq 392(%rsp)
.cfi_def_cfa_offset 200
pushq 392(%rsp)
.cfi_def_cfa_offset 208
pushq 392(%rsp)
.cfi_def_cfa_offset 216
pushq 392(%rsp)
.cfi_def_cfa_offset 224
pushq 392(%rsp)
.cfi_def_cfa_offset 232
pushq 392(%rsp)
.cfi_def_cfa_offset 240
pushq 392(%rsp)
.cfi_def_cfa_offset 248
pushq 392(%rsp)
.cfi_def_cfa_offset 256
pushq 392(%rsp)
.cfi_def_cfa_offset 264
pushq 392(%rsp)
.cfi_def_cfa_offset 272
pushq 392(%rsp)
.cfi_def_cfa_offset 280
pushq 392(%rsp)
.cfi_def_cfa_offset 288
pushq 392(%rsp)
.cfi_def_cfa_offset 296
pushq 392(%rsp)
.cfi_def_cfa_offset 304
pushq 392(%rsp)
.cfi_def_cfa_offset 312
pushq 392(%rsp)
.cfi_def_cfa_offset 320
pushq 392(%rsp)
.cfi_def_cfa_offset 328
pushq 392(%rsp)
.cfi_def_cfa_offset 336
pushq 392(%rsp)
.cfi_def_cfa_offset 344
pushq 392(%rsp)
.cfi_def_cfa_offset 352
pushq 392(%rsp)
.cfi_def_cfa_offset 360
pushq 392(%rsp)
.cfi_def_cfa_offset 368
pushq 392(%rsp)
.cfi_def_cfa_offset 376
pushq 392(%rsp)
.cfi_def_cfa_offset 384
pushq 392(%rsp)
.cfi_def_cfa_offset 392
pushq 392(%rsp)
.cfi_def_cfa_offset 400
call _Z138__device_stub__Z9RicianMLEPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjjPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjj
addq $392, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2073:
.size _Z9RicianMLEPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjj, .-_Z9RicianMLEPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjj
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z9RicianMLEPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjj"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2075:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z9RicianMLEPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjj(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2075:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | /* Rician MLE diffusion and kurtosis tensor estimator by Viljami Sairanen (2016)
Based on algorithm in:
"Liu, Jia, Dario Gasbarra, and Juha Railavo.
"Fast Estimation of Diffusion Tensors under
Rician noise by the EM algorithm."
Journal of neuroscience methods 257 (2016) : 147 - 158" */
// to convert between single and double precision use following changes:
// double <-> double
// sqrt( <-> sqrt(
// fabs( <-> fabs(
// exp( <-> exp(
// log( <-> log(
#include <math.h>
__device__ size_t calculateGlobalIndex() {
// Which block are we?
size_t const globalBlockIndex = blockIdx.x + blockIdx.y * gridDim.x;
// Which THREAD are we within the block?
size_t const localthreadIdx = threadIdx.x + blockDim.x * threadIdx.y;
// How big is each block?
size_t const threadsPerBlock = blockDim.x*blockDim.y;
// Which THREAD are we overall?
return localthreadIdx + globalBlockIndex*threadsPerBlock;
}
__device__ double getBesseli0(double x) {
double ax, ans, y;
ax = fabs(x);
if (ax < 3.75) {
y = x / 3.75;
y *= y;
ans = 1.0 + y*(3.5156229 + y*(3.0899424 + y*(1.2067492 +
y*(0.2659732 + y*(0.360768e-1 + y*0.45813e-2)))));
ans *= exp(-ax); // scale by exp(-abs(real(x))); see matlab help for besseli
}
else {
y = 3.75 / ax;
ans = (1.0 / sqrt(ax)) * // scale by exp(-abs(real(x))); see matlab help for besseli
(0.39894228 + y * (0.1328592e-1
+ y * (0.225319e-2 + y * (-0.157565e-2 + y * (0.916281e-2
+ y * (-0.2057706e-1 + y * (0.2635537e-1 + y * (-0.1647633e-1
+ y * (0.392377e-2)))))))));
}
return ans;
}
__device__ double getBesseli1(double x) {
double ax, ans, y;
ax = fabs(x);
if (ax < 3.75) {
y = x / 3.75;
y *= y;
ans = ax * (0.5 + y *(0.87890594 + y *(0.51498869 + y *(0.15084934
+ y * (0.2658733e-1 + y * (0.301532e-2 + y * 0.32411e-3))))));
ans *= exp(-ax); // scale by exp(-abs(real(x))); see matlab help for besseli
}
else {
y = 3.75 / ax;
ans = 0.2282967e-1 + y * (-0.2895312e-1 + y * (0.1787654e-1
- y * 0.420059e-2));
ans = 0.39894228 + y * (-0.3988024e-1 + y * (-0.362018e-2
+ y * (0.163801e-2 + y * (-0.1031555e-1 + y * ans))));
ans *= 1.0 / sqrt(ax); // scale by exp(-abs(real(x))); see matlab help for besseli
}
return x < 0.0 ? -ans : ans;
}
__device__ double getMax(
double *arr,
const unsigned int length,
size_t const THREAD) {
double ans;
ans = arr[THREAD * length];
for (int i = 1; i < length; i++) {
if (arr[THREAD * length + i] > ans) {
ans = arr[THREAD * length + i];
}
}
return ans;
}
__device__ void LUdecomposition(double *a, int n, int *indx, double *vv, size_t const THREAD) {
int i, imax, j, k;
double big, dum, sum, temp;
for (i = 0; i<n; i++) {
big = 0.0;
for (j = 0; j<n; j++) {
temp = fabs(a[THREAD * n * n+ i*n + j]);
if (temp >= big) {
big = temp;
}
}
if (big == 0.0) { // Singular matrix can't compute
big = 1.0e-20;
}
vv[THREAD * n + i] = 1.0 / big;
}
for (j = 0; j<n; j++) {
for (i = 0; i<j; i++) {
sum = a[THREAD * n * n+ i*n + j];
for (k = 0; k<i; k++) {
sum -= a[THREAD * n * n+ i*n + k] * a[THREAD * n * n+ k*n + j];
}
a[THREAD * n * n+ i*n + j] = sum;
}
big = 0.0;
for (i = j; i<n; i++) {
sum = a[THREAD * n * n+ i*n + j];
for (k = 0; k<j; k++) {
sum -= a[THREAD * n * n+ i*n + k] * a[THREAD * n * n+ k*n + j];
}
a[THREAD * n * n+ i*n + j] = sum;
dum = vv[THREAD * n+ i] * fabs(sum);
if (dum >= big) {
big = dum;
imax = i;
}
}
if (j != imax) {
for (k = 0; k<n; k++) {
dum = a[THREAD * n * n+ imax*n + k];
a[imax*n + k] = a[THREAD * n * n+ j*n + k];
a[THREAD * n * n+ j*n + k] = dum;
}
vv[THREAD * n+ imax] = vv[THREAD * n+ j];
}
indx[THREAD * n+ j] = imax;
if (a[THREAD * n * n+ j*n + j] == 0.0) {
a[THREAD * n * n+ j*n + j] = 1.0e-20;
}
if (j != n) {
dum = 1.0 / a[THREAD * n * n+ j*n + j];
for (i = j + 1; i<n; i++) {
a[THREAD * n * n+ i*n + j] *= dum;
}
}
}
}
__device__ void LUsubstitutions(double *a, int n, int *indx, double *b, size_t const THREAD) {
int i, ii = 0, ip, j;
double sum;
for (i = 0; i<n; i++) {
ip = indx[(THREAD * n) + i];
sum = b[(THREAD * n) + ip];
b[(THREAD * n) + ip] = b[(THREAD * n) + i];
if (ii != 0) {
for (j = ii - 1; j<i; j++) {
sum -= a[(THREAD * n * n) + (i * n) + j] * b[(THREAD * n) + j];
}
}
else if (sum != 0) {
ii = i + 1;
}
b[(THREAD * n) + i] = sum;
}
for (i = n - 1; i >= 0; i--) {
sum = b[(THREAD * n) + i];
for (j = i + 1; j<n; j++) {
sum -= a[(THREAD * n * n) + (i * n) + j] * b[(THREAD * n) + j];
}
b[(THREAD * n) + i] = sum / a[(THREAD * n * n) + (i * n) + i];
}
}
__device__ void CholeskyDecomposition(double *a, int n, double *p, size_t const THREAD) {
int i, j, k;
double sum;
for (i = 0; i < n; i++) {
for (j = i; j < n; j++) {
sum = a[(THREAD * n * n) + (i*n) + j];
for (k = i-1; k >= 0; k--) {
sum -= a[(THREAD * n * n) + (i*n) + k]
* a[(THREAD * n * n) + (j*n) + k];
}
if (i == j) {
if (sum <= 0.0) {
sum = 1.0e-20; // Cholesky decomposition failed
}
p[THREAD*n + i] = sqrt(sum);
}
else {
a[(THREAD*n*n) + (j*n) + i] = sum / p[THREAD*n + i];
}
}
}
}
__device__ void CholeskyBacksubstitution(double *a, int n, double *p, double *b, double *x, size_t const THREAD) {
int i, k;
double sum;
for (i = 0; i < n; i++) { // Solve Ly=b, storing y in x
sum = b[THREAD*n + i];
for (k = i-1; k >= 0; k--) {
sum -= a[(THREAD*n*n) + (i*n) + k] * x[THREAD*n + k];
}
x[THREAD*n + i] = sum / p[THREAD*n + i];
}
for (i = n; i >= 0; i--) { // Solve L^(T)x=y
sum = x[THREAD*n + i];
for (k = i+1; k < n; k++) {
sum -= a[(THREAD*n*n) + (k*n) + i] * x[THREAD*n + k];
}
x[THREAD*n + i] = sum / p[THREAD*n + i];
}
}
__device__ void calculateExpZTheta(
double *expZTheta,
double *theta,
double *Z,
const unsigned int nParams,
const unsigned int nDWIs,
size_t const THREAD) {
for (int i = 0; i < nDWIs; i++) {
expZTheta[THREAD * nDWIs + i] = 0.0;
for (int j = 0; j < nParams; j++) {
expZTheta[THREAD * nDWIs + i] +=
Z[j * nDWIs + i] * theta[THREAD * nParams + j];
}
expZTheta[THREAD * nDWIs + i] = exp(expZTheta[THREAD * nDWIs + i]);
}
}
__device__ void calculateAB_1(
double *a,
double *b,
double *Y,
double *expZTheta,
double *sumYSQ,
const unsigned int nDWIs,
size_t const THREAD) {
a[THREAD] = sumYSQ[THREAD];
for (int i = 0; i < nDWIs; i++) {
a[THREAD] += expZTheta[THREAD * nDWIs + i] * expZTheta[THREAD * nDWIs + i];
b[THREAD * nDWIs + i] = Y[THREAD * nDWIs + i] * expZTheta[THREAD * nDWIs + i];
}
}
__device__ void calculateAB_2(
double *a,
double *b,
double *Y,
double *Z,
double *theta,
double *SigmaSQ,
double *expZTheta,
double *twotau,
const unsigned int nDWIs,
const unsigned int nParams,
size_t const THREAD) {
// Now indexing for i ranges [0, nDWIs-1] and j ranges [1, nParams] since first nParams is the theta(1)
a[THREAD] = 0.0;
for (int i = 0; i < nDWIs; i++) {
expZTheta[THREAD * nDWIs + i] = 0.0;
for (int j = 1; j < nParams; j++) {
expZTheta[THREAD * nDWIs + i] +=
Z[j * nDWIs + i] * theta[THREAD * nParams + j];
}
expZTheta[THREAD * nDWIs + i] = exp(expZTheta[THREAD * nDWIs + i]);
a[THREAD] += expZTheta[THREAD * nDWIs + i] * expZTheta[THREAD * nDWIs + i];
b[THREAD * nDWIs + i] = Y[THREAD * nDWIs + i] * expZTheta[THREAD * nDWIs + i];
twotau[THREAD * nDWIs + i] = b[THREAD * nDWIs + i] * exp(theta[THREAD * nParams+0]) / SigmaSQ[THREAD];
}
a[THREAD] = log(a[THREAD]);
}
__device__ void calculateEN(
double *EN,
double *twotau,
const unsigned int nDWIs,
bool *anyEN,
size_t const THREAD) {
anyEN[THREAD] = false;
for (int i = 0; i < nDWIs; i++) {
EN[THREAD * nDWIs + i] = 0.5 * twotau[THREAD * nDWIs + i] *
getBesseli1(twotau[THREAD * nDWIs + i]) /
getBesseli0(twotau[THREAD * nDWIs + i]);
if (EN[THREAD * nDWIs + i] > 0.0) {
anyEN[THREAD] = true;
}
}
}
__device__ void calculateZTheta(
double *c,
double *ZTheta,
double *theta,
double *SigmaSQ,
double *Z,
const unsigned int nDWIs,
const unsigned int nParams,
size_t const THREAD) {
// Now indexing for i ranges [0, nDWIs-1] and j ranges [1, nParams] since first nParams is the theta(1)
c[THREAD] = 2.0 * theta[THREAD * nParams+0] -
log(2.0 * SigmaSQ[THREAD]);
for (int i = 0; i < nDWIs; i++) {
ZTheta[THREAD * nDWIs + i] = 0.0;
for (int j = 1; j < nParams; j++) {
ZTheta[THREAD * nDWIs + i] +=
Z[j * nDWIs + i] * theta[THREAD * nParams + j];
}
ZTheta[THREAD * nDWIs + i] *= 2.0;
ZTheta[THREAD * nDWIs + i] += c[THREAD];
}
}
__device__ void calculateLoglikelihood(
double *loglikelihood,
double *expo,
double *ZTheta,
double *scaling,
double *expScaling,
double *EN,
const unsigned int nDWIs,
size_t const THREAD) {
loglikelihood[THREAD] = 0.0;
for (int i = 0; i < nDWIs; i++) {
expo[THREAD * nDWIs + i] = exp(ZTheta[THREAD * nDWIs + i] - scaling[THREAD]);
loglikelihood[THREAD] +=
EN[THREAD * nDWIs + i] * ZTheta[THREAD * nDWIs + i]
- expo[THREAD * nDWIs + i] * expScaling[THREAD];
}
}
__device__ void initializeInformationMatrices(
double *fisherInformation,
double *fisherInformation_sym,
const unsigned int nDeltaParams,
size_t const THREAD) {
for (int i = 0; i < nDeltaParams*nDeltaParams; i++) {
fisherInformation[THREAD * nDeltaParams*nDeltaParams + i] = 0.0;
fisherInformation_sym[THREAD * nDeltaParams*nDeltaParams + i] = 0.0;
}
}
__device__ void iterateSigmaSQ(
double *SigmaSQ,
double *SigmaSQ0,
double *tmpdouble,
double *a,
double *b,
double *twotau,
unsigned int *nIterSigmaSQ,
unsigned int iterLimitSigmaSQ,
const double toleranceSigmaSQ,
const unsigned int nDWIs,
bool *continueSigmaSQIteration,
size_t const THREAD) {
// Should be ok
continueSigmaSQIteration[THREAD] = true;
nIterSigmaSQ[THREAD] = 0;
while (continueSigmaSQIteration[THREAD]) {
(nIterSigmaSQ[THREAD])++;
SigmaSQ0[THREAD] = SigmaSQ[THREAD];
tmpdouble[THREAD] = 0.0;
for (int i = 0; i < nDWIs; i++) {
twotau[THREAD * nDWIs + i] = b[THREAD * nDWIs + i] / SigmaSQ[THREAD];
tmpdouble[THREAD] += twotau[THREAD * nDWIs + i] *
getBesseli1(twotau[THREAD * nDWIs + i]) /
getBesseli0(twotau[THREAD * nDWIs + i]);
}
SigmaSQ[THREAD] = 0.5 * a[THREAD] / ((double)(nDWIs) + tmpdouble[THREAD]);
continueSigmaSQIteration[THREAD] =
((nIterSigmaSQ[THREAD] < iterLimitSigmaSQ)
&&
(fabs(SigmaSQ[THREAD] - SigmaSQ0[THREAD]) > toleranceSigmaSQ));
}
}
__device__ void iterateS0(
double *theta,
double *theta1_old,
double *SigmaSQ,
double *a,
double *b,
double *twotau,
unsigned int *nIterS0,
unsigned int iterLimitS0,
const double toleranceS0,
const unsigned int nDWIs,
const unsigned int nParams,
bool *continueS0Iteration,
size_t const THREAD) {
continueS0Iteration[THREAD] = true;
nIterS0[THREAD] = 0;
while (continueS0Iteration[THREAD]) {
nIterS0[THREAD]++;
// Get initial theta(1) parameter
theta1_old[THREAD] = theta[THREAD * nParams+0];
// Calculate new theta(1) parameter
theta[THREAD * nParams+0] = 0.0;
for (int i = 0; i < nDWIs; i++) {
theta[THREAD * nParams+0] += (b[THREAD * nDWIs + i] *
getBesseli1(twotau[THREAD * nDWIs + i]) /
getBesseli0(twotau[THREAD * nDWIs + i]));
}
theta[THREAD * nParams+0] = log(theta[THREAD * nParams+0]) -a[THREAD];
// Update twotau for the next iteration step
for (int i = 0; i < nDWIs; i++) {
twotau[THREAD * nDWIs + i] = b[THREAD * nDWIs + i] *
exp(theta[THREAD * nParams+0]) / SigmaSQ[THREAD];
}
// Test to end while loop
continueS0Iteration[THREAD] =
((nIterS0[THREAD] < iterLimitS0)
&&
(fabs((theta[THREAD * nParams + 0] - theta1_old[THREAD]) / theta1_old[THREAD])));
}
}
__device__ void calculateFisherInformation(
double *fisherInformation,
double *fisherInformation_sym,
double *Z,
double *score,
double *DeltaTheta,
double *expo,
double *EN,
double *expScaling,
const unsigned int nDWIs,
const unsigned int nParams,
const unsigned int nDeltaParams,
size_t const THREAD) {
for (int j = 1; j < nParams; j++) {
score[THREAD * nDeltaParams + j - 1] = 0.0;
for (int i = 0; i < nDWIs; i++) {
score[THREAD * nDeltaParams + j - 1] +=
2.0 * Z[j * nDWIs + i] * (EN[THREAD * nDWIs + i] -
expo[THREAD * nDWIs + i] * expScaling[THREAD]);
for (int k = 1; k < nParams; k++) { // range of j and k are [1 to nParams]
fisherInformation[THREAD * nDeltaParams*nDeltaParams + (j - 1)*nDeltaParams + (k - 1)] +=
4.0 * Z[j * nDWIs + i] * Z[k * nDWIs + i] * expo[THREAD * nDWIs + i];
// Symmetrize Fisher Information
fisherInformation_sym[THREAD * nDeltaParams*nDeltaParams + (j - 1)*nDeltaParams + (k - 1)] =
(fisherInformation[THREAD * nDeltaParams*nDeltaParams + (j - 1)*nDeltaParams + (k - 1)] +
fisherInformation[THREAD * nDeltaParams*nDeltaParams + (j - 1)*nDeltaParams + (k - 1)]) *
0.5 * expScaling[THREAD];
}
}
DeltaTheta[THREAD * nDeltaParams + j - 1] = score[THREAD *nDeltaParams + j - 1];
}
// Make copy of symmetric Fisher information matrix
for (int i = 0; i < nDeltaParams*nDeltaParams; i++) {
fisherInformation[THREAD * nDeltaParams * nDeltaParams + i] = fisherInformation_sym[THREAD * nDeltaParams * nDeltaParams + i];
}
}
__device__ void iterateLoglikelihood(
int *indx,
double *score,
double *vv,
double *DeltaTheta,
double *Z,
double *expo,
double *theta,
double *loglikelihood,
double *loglikelihood_old,
double *new_theta,
double *regulatorLambda,
double *fisherInformation,
double *fisherInformation_sym,
double *ZTheta,
double *c,
double *scaling,
double *expScaling,
double *EN,
const unsigned int nDWIs,
const unsigned int nParams,
const unsigned int nDeltaParams,
const double regulatorLambda0,
const double regulatorRescaling,
unsigned int *nIterLoglikelihood,
const unsigned int iterLimitLoglikelihood,
const double toleranceLoglikelihood,
bool *continueLoglikelihoodIteration,
size_t const THREAD) {
nIterLoglikelihood[THREAD] = 0;
continueLoglikelihoodIteration[THREAD] = true;
regulatorLambda[THREAD] = regulatorLambda0;
while (continueLoglikelihoodIteration[THREAD]) {
nIterLoglikelihood[THREAD]++;
//loglikelihood_old[THREAD] = loglikelihood[THREAD]; // loglikelihood_old is not supposed to be updated in this loop
// Initialize DeltaTheta for LUdecomposition & substitutions
// because X = I\score calculated using LUsubstitutions actually
// replaces values in score and we don't want to loose that information
// so we have to save score into DeltaTheta variable
for (int j = 1; j < nParams; j++) {
DeltaTheta[THREAD * nDeltaParams + j - 1] = score[THREAD *nDeltaParams + j - 1];
}
// Regularize Fisher information matrix with lambda
for (int i = 0; i < nDeltaParams; i++) {
fisherInformation[THREAD * nDeltaParams*nDeltaParams + i*nDeltaParams + i] =
fisherInformation_sym[THREAD * nDeltaParams*nDeltaParams + i*nDeltaParams + i]
+ regulatorLambda[THREAD];
}
// Update regulatorLambda
regulatorLambda[THREAD] *= regulatorRescaling;
//LUdecomposition(fisherInformation, nDeltaParams, indx, vv, THREAD);
//LUsubstitutions(fisherInformation, nDeltaParams, indx, DeltaTheta, THREAD);
CholeskyDecomposition(fisherInformation, nDeltaParams, vv, THREAD);
CholeskyBacksubstitution(fisherInformation, nDeltaParams, vv, score, DeltaTheta, THREAD);
//goto THE_END_LOGLIKELIHOOD;
// Calculate new theta(2:end)
for (int i = 1; i < nParams; i++) {
new_theta[THREAD * nDeltaParams + i - 1] =
theta[THREAD * nParams + i]
+ DeltaTheta[THREAD * nDeltaParams + i - 1];
}
// Calculate ZTheta based on new_theta
for (int i = 0; i < nDWIs; i++) {
ZTheta[THREAD * nDWIs + i] = 0.0;
for (int j = 1; j < nParams; j++) {
ZTheta[THREAD * nDWIs + i] +=
Z[j* nDWIs + i] * new_theta[THREAD * nDeltaParams + j - 1];
}
ZTheta[THREAD * nDWIs + i] *= 2.0;
ZTheta[THREAD * nDWIs + i] += c[THREAD]; // c is based on theta(1) and sigmasq that are constant in this loop
}
scaling[THREAD] = getMax(ZTheta, nDWIs, THREAD);
expScaling[THREAD] = exp(scaling[THREAD]);
// Calculate new loglikelihood
// calculateLoglikelihood updates loglikelihood and expo variables
calculateLoglikelihood(loglikelihood, expo, ZTheta, scaling, expScaling, EN, nDWIs, THREAD);
// Check if new loglikelihood is NaN, if so more regulation is needed
// (f != f) is true only if f is NaN (IEEE standard)
if (loglikelihood[THREAD] != loglikelihood[THREAD]) {
// loglikelihood is NaN, check only iterations
continueLoglikelihoodIteration[THREAD] = (nIterLoglikelihood[THREAD] < iterLimitLoglikelihood);
}
else {
continueLoglikelihoodIteration[THREAD] =
((loglikelihood[THREAD] < loglikelihood_old[THREAD])
&&
(nIterLoglikelihood[THREAD] < iterLimitLoglikelihood));
}
}
//THE_END_LOGLIKELIHOOD:
}
__device__ void iterateTheta(
int *indx,
double *vv,
double *theta,
double *ZTheta,
double *c,
double *fisherInformation,
double *fisherInformation_sym,
double *score,
double *Z,
double *EN,
double *scaling,
double *expScaling,
double *expo,
double *DeltaTheta,
double *DeltaThetaScore,
double *new_theta,
double *loglikelihood,
double *loglikelihood_old,
double *regulatorLambda,
const double regulatorLambda0,
const double regulatorRescaling,
const unsigned int nDWIs,
const unsigned int nParams,
const unsigned int nDeltaParams,
unsigned int *nIterTheta,
unsigned int *nIterLoglikelihood,
const unsigned int iterLimitTheta,
const unsigned int iterLimitLoglikelihood,
const double toleranceTheta,
const double toleranceLoglikelihood,
bool *continueThetaIteration,
bool *continueLoglikelihoodIteration,
size_t const THREAD) {
// Now indexing for i ranges [0, nDWIs-1] and j ranges [1, nParams] since first nParams is the theta(1)
continueThetaIteration[THREAD] = true;
nIterTheta[THREAD] = 0;
loglikelihood_old[THREAD] = loglikelihood[THREAD];
while (continueThetaIteration[THREAD]) {
nIterTheta[THREAD]++;
calculateFisherInformation(fisherInformation, fisherInformation_sym, Z, score, DeltaTheta, expo, EN, expScaling, nDWIs, nParams, nDeltaParams, THREAD);
// Optimize loglikelihood
iterateLoglikelihood(indx, score, vv, DeltaTheta, Z, expo, theta, loglikelihood, loglikelihood_old, new_theta, regulatorLambda, fisherInformation, fisherInformation_sym, ZTheta, c, scaling, expScaling, EN, nDWIs, nParams, nDeltaParams, regulatorLambda0, regulatorRescaling, nIterLoglikelihood, iterLimitLoglikelihood, toleranceLoglikelihood, continueLoglikelihoodIteration, THREAD);
//goto THE_END_THETA;
DeltaThetaScore[THREAD] = 0.0;
for (int i = 0; i < nDeltaParams; i++) {
DeltaThetaScore[THREAD] += DeltaTheta[THREAD * nDeltaParams + i]
* score[THREAD * nDeltaParams + i];
}
// Check if new loglikelihood is NaN, if not
// update theta(2:end) and loglikelihood_old
if (loglikelihood[THREAD] != loglikelihood[THREAD]) {
// NaN, don't update variables
continueThetaIteration[THREAD] = (nIterTheta[THREAD] < iterLimitTheta);
} else {
for (int i = 1; i < nParams; i++) {
theta[THREAD * nParams + i] = new_theta[THREAD * nDeltaParams + i - 1];
}
loglikelihood_old[THREAD] = loglikelihood[THREAD];
continueThetaIteration[THREAD] =
(((DeltaThetaScore[THREAD] > toleranceTheta)
||
((loglikelihood[THREAD] - loglikelihood_old[THREAD]) > toleranceLoglikelihood))
&&
(nIterTheta[THREAD] < iterLimitTheta));
}
}
//THE_END_THETA:
}
__device__ void calculateNorms(
double *norm1,
double *norm2,
double *theta,
double *theta_old,
const unsigned int nParams,
size_t const THREAD) {
norm1[THREAD] = 0.0;
norm2[THREAD] = 0.0;
for (int i = 0; i < nParams; i++) {
norm1[THREAD] += theta_old[THREAD * nParams + i] * theta_old[THREAD * nParams + i];
norm2[THREAD] += (theta[THREAD * nParams + i] - theta_old[THREAD * nParams + i])*
(theta[THREAD * nParams + i] - theta_old[THREAD * nParams + i]);
}
norm1[THREAD] = sqrt(norm1[THREAD]);
norm2[THREAD] = sqrt(norm2[THREAD]);
}
__global__ void RicianMLE(
double *theta,
double *SigmaSQ,
double *Z,
double *fisherInformation,
double *fisherInformation_sym,
double *score,
double *DeltaTheta,
double *new_theta,
double *vv,
int *indx,
double *theta_old,
double *Y,
double *expZTheta,
double *ZTheta,
double *twotau,
double *expo,
double *EN,
double *b,
double *a,
double *c,
double *sumYSQ,
double *theta1_old,
double *SigmaSQ0,
double *SigmaSQ_old,
double *tmpdouble,
double *scaling,
double *expScaling,
double *loglikelihood,
double *loglikelihood_old,
double *regulatorLambda,
double *DeltaThetaScore,
double *norm1,
double *norm2,
unsigned int *nIterSigmaSQ,
unsigned int *nIterVoxel,
unsigned int *nIterS0,
unsigned int *nIterTheta,
unsigned int *nIterLoglikelihood,
bool *continueSigmaSQIteration,
bool *continueVoxelIteration,
bool *continueS0Iteration,
bool *continueThetaIteration,
bool *continueLoglikelihoodIteration,
bool *anyEN,
const double toleranceSigmaSQ,
const double toleranceS0,
const double toleranceTheta,
const double toleranceLoglikelihood,
const unsigned int iterLimitSigmaSQ,
const unsigned int iterLimitVoxel,
const unsigned int iterLimitS0,
const unsigned int iterLimitTheta,
const unsigned int iterLimitLoglikelihood,
const double regulatorLambda0,
const double regulatorRescaling,
const unsigned int nDWIs,
const unsigned int nParams,
const unsigned int nDeltaParams,
const unsigned int nVoxels) {
// Initial, work out which THREAD i.e. voxel we are computing
size_t const THREAD = calculateGlobalIndex();
if (THREAD >= nVoxels) {
return;
}
// First, optimize Rician loglikelihood w.r.t. SigmaSQ
calculateExpZTheta( expZTheta, theta, Z, nParams, nDWIs, THREAD);
calculateAB_1(a, b, Y, expZTheta, sumYSQ, nDWIs, THREAD);
iterateSigmaSQ(SigmaSQ, SigmaSQ0, tmpdouble, a, b, twotau, nIterSigmaSQ, iterLimitSigmaSQ, toleranceSigmaSQ, nDWIs, continueSigmaSQIteration, THREAD);
// Start voxel-wise optimization
continueVoxelIteration[THREAD] = true;
while (continueVoxelIteration[THREAD]) {
nIterVoxel[THREAD]++;
// Save initial theta and SigmaSQ to be used later to test if voxel optimization continues
SigmaSQ_old[THREAD] = SigmaSQ[THREAD];
for (int i = 0; i < nParams; i++) {
theta_old[THREAD * nParams + i] = theta[THREAD * nParams + i];
}
// Second, optimize w.r.t. S0 i.e. theta(1) with fixed theta(2:end) and SigmaSQ
// calcuateAB_2 updates a,b, expZTheta, and twotau variables
calculateAB_2(a, b, Y, Z, theta, SigmaSQ, expZTheta, twotau, nDWIs, nParams, THREAD);
// iterateS0 updates theta(1) and twotau variables
iterateS0(theta, theta1_old, SigmaSQ, a, b, twotau, nIterS0, iterLimitS0, toleranceS0, nDWIs, nParams, continueS0Iteration, THREAD);
// Third, optimize w.r.t. theta(2:end) with fixed theta(1) and SigmaSQ
// calculateEN updates conditional expectation EN and checks if any(EN > 0)
calculateEN(EN, twotau, nDWIs, anyEN, THREAD);
if (anyEN[THREAD]) {
// There is information to estimate tensor(s)
// calculateZTheta updates c and ZTheta variables
calculateZTheta(c, ZTheta, theta, SigmaSQ, Z, nDWIs, nParams, THREAD);
scaling[THREAD] = getMax(ZTheta, nDWIs, THREAD);
expScaling[THREAD] = exp(scaling[THREAD]);
// calculateLoglikelihood updates loglikelihood and expo variables
calculateLoglikelihood(loglikelihood, expo, ZTheta, scaling, expScaling, EN, nDWIs, THREAD);
initializeInformationMatrices(fisherInformation, fisherInformation_sym, nDeltaParams, THREAD);
iterateTheta(indx, vv, theta, ZTheta, c, fisherInformation, fisherInformation_sym, score, Z, EN, scaling, expScaling, expo, DeltaTheta, DeltaThetaScore, new_theta, loglikelihood, loglikelihood_old, regulatorLambda, regulatorLambda0, regulatorRescaling, nDWIs, nParams, nDeltaParams, nIterTheta, nIterLoglikelihood, iterLimitTheta, iterLimitLoglikelihood, toleranceTheta, toleranceLoglikelihood, continueThetaIteration, continueLoglikelihoodIteration, THREAD);
//goto THE_END;
}
else {
// There is no information for estimations
// Set theta(2:end) and information to zero
for (int i = 1; i < nParams; i++) {
theta[THREAD * nParams + i] = 0.0;
}
initializeInformationMatrices(fisherInformation, fisherInformation_sym, nDeltaParams, THREAD);
}
// Last, optimize w.r.t. SigmaSQ with fixed theta
calculateExpZTheta(expZTheta, theta, Z, nParams, nDWIs, THREAD);
calculateAB_1(a, b, Y, expZTheta, sumYSQ, nDWIs, THREAD);
iterateSigmaSQ(SigmaSQ, SigmaSQ0, tmpdouble, a, b, twotau, nIterSigmaSQ, iterLimitSigmaSQ, toleranceSigmaSQ, nDWIs, continueSigmaSQIteration, THREAD);
calculateNorms(norm1, norm2, theta, theta_old, nParams, THREAD);
continueVoxelIteration[THREAD] =
(((fabs((SigmaSQ[THREAD] - SigmaSQ_old[THREAD]) / SigmaSQ_old[THREAD]) > toleranceSigmaSQ)
||
((norm2[THREAD] / norm1[THREAD]) > toleranceTheta))
&&
(nIterVoxel[THREAD] < iterLimitVoxel));
}
//THE_END:
} | /* Rician MLE diffusion and kurtosis tensor estimator by Viljami Sairanen (2016)
Based on algorithm in:
"Liu, Jia, Dario Gasbarra, and Juha Railavo.
"Fast Estimation of Diffusion Tensors under
Rician noise by the EM algorithm."
Journal of neuroscience methods 257 (2016) : 147 - 158" */
// to convert between single and double precision use following changes:
// double <-> double
// sqrt( <-> sqrt(
// fabs( <-> fabs(
// exp( <-> exp(
// log( <-> log(
#include <hip/hip_runtime.h>
#include <math.h>
__device__ size_t calculateGlobalIndex() {
// Which block are we?
size_t const globalBlockIndex = blockIdx.x + blockIdx.y * gridDim.x;
// Which THREAD are we within the block?
size_t const localthreadIdx = threadIdx.x + blockDim.x * threadIdx.y;
// How big is each block?
size_t const threadsPerBlock = blockDim.x*blockDim.y;
// Which THREAD are we overall?
return localthreadIdx + globalBlockIndex*threadsPerBlock;
}
__device__ double getBesseli0(double x) {
double ax, ans, y;
ax = fabs(x);
if (ax < 3.75) {
y = x / 3.75;
y *= y;
ans = 1.0 + y*(3.5156229 + y*(3.0899424 + y*(1.2067492 +
y*(0.2659732 + y*(0.360768e-1 + y*0.45813e-2)))));
ans *= exp(-ax); // scale by exp(-abs(real(x))); see matlab help for besseli
}
else {
y = 3.75 / ax;
ans = (1.0 / sqrt(ax)) * // scale by exp(-abs(real(x))); see matlab help for besseli
(0.39894228 + y * (0.1328592e-1
+ y * (0.225319e-2 + y * (-0.157565e-2 + y * (0.916281e-2
+ y * (-0.2057706e-1 + y * (0.2635537e-1 + y * (-0.1647633e-1
+ y * (0.392377e-2)))))))));
}
return ans;
}
__device__ double getBesseli1(double x) {
double ax, ans, y;
ax = fabs(x);
if (ax < 3.75) {
y = x / 3.75;
y *= y;
ans = ax * (0.5 + y *(0.87890594 + y *(0.51498869 + y *(0.15084934
+ y * (0.2658733e-1 + y * (0.301532e-2 + y * 0.32411e-3))))));
ans *= exp(-ax); // scale by exp(-abs(real(x))); see matlab help for besseli
}
else {
y = 3.75 / ax;
ans = 0.2282967e-1 + y * (-0.2895312e-1 + y * (0.1787654e-1
- y * 0.420059e-2));
ans = 0.39894228 + y * (-0.3988024e-1 + y * (-0.362018e-2
+ y * (0.163801e-2 + y * (-0.1031555e-1 + y * ans))));
ans *= 1.0 / sqrt(ax); // scale by exp(-abs(real(x))); see matlab help for besseli
}
return x < 0.0 ? -ans : ans;
}
__device__ double getMax(
double *arr,
const unsigned int length,
size_t const THREAD) {
double ans;
ans = arr[THREAD * length];
for (int i = 1; i < length; i++) {
if (arr[THREAD * length + i] > ans) {
ans = arr[THREAD * length + i];
}
}
return ans;
}
__device__ void LUdecomposition(double *a, int n, int *indx, double *vv, size_t const THREAD) {
int i, imax, j, k;
double big, dum, sum, temp;
for (i = 0; i<n; i++) {
big = 0.0;
for (j = 0; j<n; j++) {
temp = fabs(a[THREAD * n * n+ i*n + j]);
if (temp >= big) {
big = temp;
}
}
if (big == 0.0) { // Singular matrix can't compute
big = 1.0e-20;
}
vv[THREAD * n + i] = 1.0 / big;
}
for (j = 0; j<n; j++) {
for (i = 0; i<j; i++) {
sum = a[THREAD * n * n+ i*n + j];
for (k = 0; k<i; k++) {
sum -= a[THREAD * n * n+ i*n + k] * a[THREAD * n * n+ k*n + j];
}
a[THREAD * n * n+ i*n + j] = sum;
}
big = 0.0;
for (i = j; i<n; i++) {
sum = a[THREAD * n * n+ i*n + j];
for (k = 0; k<j; k++) {
sum -= a[THREAD * n * n+ i*n + k] * a[THREAD * n * n+ k*n + j];
}
a[THREAD * n * n+ i*n + j] = sum;
dum = vv[THREAD * n+ i] * fabs(sum);
if (dum >= big) {
big = dum;
imax = i;
}
}
if (j != imax) {
for (k = 0; k<n; k++) {
dum = a[THREAD * n * n+ imax*n + k];
a[imax*n + k] = a[THREAD * n * n+ j*n + k];
a[THREAD * n * n+ j*n + k] = dum;
}
vv[THREAD * n+ imax] = vv[THREAD * n+ j];
}
indx[THREAD * n+ j] = imax;
if (a[THREAD * n * n+ j*n + j] == 0.0) {
a[THREAD * n * n+ j*n + j] = 1.0e-20;
}
if (j != n) {
dum = 1.0 / a[THREAD * n * n+ j*n + j];
for (i = j + 1; i<n; i++) {
a[THREAD * n * n+ i*n + j] *= dum;
}
}
}
}
__device__ void LUsubstitutions(double *a, int n, int *indx, double *b, size_t const THREAD) {
int i, ii = 0, ip, j;
double sum;
for (i = 0; i<n; i++) {
ip = indx[(THREAD * n) + i];
sum = b[(THREAD * n) + ip];
b[(THREAD * n) + ip] = b[(THREAD * n) + i];
if (ii != 0) {
for (j = ii - 1; j<i; j++) {
sum -= a[(THREAD * n * n) + (i * n) + j] * b[(THREAD * n) + j];
}
}
else if (sum != 0) {
ii = i + 1;
}
b[(THREAD * n) + i] = sum;
}
for (i = n - 1; i >= 0; i--) {
sum = b[(THREAD * n) + i];
for (j = i + 1; j<n; j++) {
sum -= a[(THREAD * n * n) + (i * n) + j] * b[(THREAD * n) + j];
}
b[(THREAD * n) + i] = sum / a[(THREAD * n * n) + (i * n) + i];
}
}
__device__ void CholeskyDecomposition(double *a, int n, double *p, size_t const THREAD) {
int i, j, k;
double sum;
for (i = 0; i < n; i++) {
for (j = i; j < n; j++) {
sum = a[(THREAD * n * n) + (i*n) + j];
for (k = i-1; k >= 0; k--) {
sum -= a[(THREAD * n * n) + (i*n) + k]
* a[(THREAD * n * n) + (j*n) + k];
}
if (i == j) {
if (sum <= 0.0) {
sum = 1.0e-20; // Cholesky decomposition failed
}
p[THREAD*n + i] = sqrt(sum);
}
else {
a[(THREAD*n*n) + (j*n) + i] = sum / p[THREAD*n + i];
}
}
}
}
__device__ void CholeskyBacksubstitution(double *a, int n, double *p, double *b, double *x, size_t const THREAD) {
int i, k;
double sum;
for (i = 0; i < n; i++) { // Solve Ly=b, storing y in x
sum = b[THREAD*n + i];
for (k = i-1; k >= 0; k--) {
sum -= a[(THREAD*n*n) + (i*n) + k] * x[THREAD*n + k];
}
x[THREAD*n + i] = sum / p[THREAD*n + i];
}
for (i = n; i >= 0; i--) { // Solve L^(T)x=y
sum = x[THREAD*n + i];
for (k = i+1; k < n; k++) {
sum -= a[(THREAD*n*n) + (k*n) + i] * x[THREAD*n + k];
}
x[THREAD*n + i] = sum / p[THREAD*n + i];
}
}
__device__ void calculateExpZTheta(
double *expZTheta,
double *theta,
double *Z,
const unsigned int nParams,
const unsigned int nDWIs,
size_t const THREAD) {
for (int i = 0; i < nDWIs; i++) {
expZTheta[THREAD * nDWIs + i] = 0.0;
for (int j = 0; j < nParams; j++) {
expZTheta[THREAD * nDWIs + i] +=
Z[j * nDWIs + i] * theta[THREAD * nParams + j];
}
expZTheta[THREAD * nDWIs + i] = exp(expZTheta[THREAD * nDWIs + i]);
}
}
__device__ void calculateAB_1(
double *a,
double *b,
double *Y,
double *expZTheta,
double *sumYSQ,
const unsigned int nDWIs,
size_t const THREAD) {
a[THREAD] = sumYSQ[THREAD];
for (int i = 0; i < nDWIs; i++) {
a[THREAD] += expZTheta[THREAD * nDWIs + i] * expZTheta[THREAD * nDWIs + i];
b[THREAD * nDWIs + i] = Y[THREAD * nDWIs + i] * expZTheta[THREAD * nDWIs + i];
}
}
__device__ void calculateAB_2(
double *a,
double *b,
double *Y,
double *Z,
double *theta,
double *SigmaSQ,
double *expZTheta,
double *twotau,
const unsigned int nDWIs,
const unsigned int nParams,
size_t const THREAD) {
// Now indexing for i ranges [0, nDWIs-1] and j ranges [1, nParams] since first nParams is the theta(1)
a[THREAD] = 0.0;
for (int i = 0; i < nDWIs; i++) {
expZTheta[THREAD * nDWIs + i] = 0.0;
for (int j = 1; j < nParams; j++) {
expZTheta[THREAD * nDWIs + i] +=
Z[j * nDWIs + i] * theta[THREAD * nParams + j];
}
expZTheta[THREAD * nDWIs + i] = exp(expZTheta[THREAD * nDWIs + i]);
a[THREAD] += expZTheta[THREAD * nDWIs + i] * expZTheta[THREAD * nDWIs + i];
b[THREAD * nDWIs + i] = Y[THREAD * nDWIs + i] * expZTheta[THREAD * nDWIs + i];
twotau[THREAD * nDWIs + i] = b[THREAD * nDWIs + i] * exp(theta[THREAD * nParams+0]) / SigmaSQ[THREAD];
}
a[THREAD] = log(a[THREAD]);
}
__device__ void calculateEN(
double *EN,
double *twotau,
const unsigned int nDWIs,
bool *anyEN,
size_t const THREAD) {
anyEN[THREAD] = false;
for (int i = 0; i < nDWIs; i++) {
EN[THREAD * nDWIs + i] = 0.5 * twotau[THREAD * nDWIs + i] *
getBesseli1(twotau[THREAD * nDWIs + i]) /
getBesseli0(twotau[THREAD * nDWIs + i]);
if (EN[THREAD * nDWIs + i] > 0.0) {
anyEN[THREAD] = true;
}
}
}
__device__ void calculateZTheta(
double *c,
double *ZTheta,
double *theta,
double *SigmaSQ,
double *Z,
const unsigned int nDWIs,
const unsigned int nParams,
size_t const THREAD) {
// Now indexing for i ranges [0, nDWIs-1] and j ranges [1, nParams] since first nParams is the theta(1)
c[THREAD] = 2.0 * theta[THREAD * nParams+0] -
log(2.0 * SigmaSQ[THREAD]);
for (int i = 0; i < nDWIs; i++) {
ZTheta[THREAD * nDWIs + i] = 0.0;
for (int j = 1; j < nParams; j++) {
ZTheta[THREAD * nDWIs + i] +=
Z[j * nDWIs + i] * theta[THREAD * nParams + j];
}
ZTheta[THREAD * nDWIs + i] *= 2.0;
ZTheta[THREAD * nDWIs + i] += c[THREAD];
}
}
__device__ void calculateLoglikelihood(
double *loglikelihood,
double *expo,
double *ZTheta,
double *scaling,
double *expScaling,
double *EN,
const unsigned int nDWIs,
size_t const THREAD) {
loglikelihood[THREAD] = 0.0;
for (int i = 0; i < nDWIs; i++) {
expo[THREAD * nDWIs + i] = exp(ZTheta[THREAD * nDWIs + i] - scaling[THREAD]);
loglikelihood[THREAD] +=
EN[THREAD * nDWIs + i] * ZTheta[THREAD * nDWIs + i]
- expo[THREAD * nDWIs + i] * expScaling[THREAD];
}
}
__device__ void initializeInformationMatrices(
double *fisherInformation,
double *fisherInformation_sym,
const unsigned int nDeltaParams,
size_t const THREAD) {
for (int i = 0; i < nDeltaParams*nDeltaParams; i++) {
fisherInformation[THREAD * nDeltaParams*nDeltaParams + i] = 0.0;
fisherInformation_sym[THREAD * nDeltaParams*nDeltaParams + i] = 0.0;
}
}
__device__ void iterateSigmaSQ(
double *SigmaSQ,
double *SigmaSQ0,
double *tmpdouble,
double *a,
double *b,
double *twotau,
unsigned int *nIterSigmaSQ,
unsigned int iterLimitSigmaSQ,
const double toleranceSigmaSQ,
const unsigned int nDWIs,
bool *continueSigmaSQIteration,
size_t const THREAD) {
// Should be ok
continueSigmaSQIteration[THREAD] = true;
nIterSigmaSQ[THREAD] = 0;
while (continueSigmaSQIteration[THREAD]) {
(nIterSigmaSQ[THREAD])++;
SigmaSQ0[THREAD] = SigmaSQ[THREAD];
tmpdouble[THREAD] = 0.0;
for (int i = 0; i < nDWIs; i++) {
twotau[THREAD * nDWIs + i] = b[THREAD * nDWIs + i] / SigmaSQ[THREAD];
tmpdouble[THREAD] += twotau[THREAD * nDWIs + i] *
getBesseli1(twotau[THREAD * nDWIs + i]) /
getBesseli0(twotau[THREAD * nDWIs + i]);
}
SigmaSQ[THREAD] = 0.5 * a[THREAD] / ((double)(nDWIs) + tmpdouble[THREAD]);
continueSigmaSQIteration[THREAD] =
((nIterSigmaSQ[THREAD] < iterLimitSigmaSQ)
&&
(fabs(SigmaSQ[THREAD] - SigmaSQ0[THREAD]) > toleranceSigmaSQ));
}
}
__device__ void iterateS0(
double *theta,
double *theta1_old,
double *SigmaSQ,
double *a,
double *b,
double *twotau,
unsigned int *nIterS0,
unsigned int iterLimitS0,
const double toleranceS0,
const unsigned int nDWIs,
const unsigned int nParams,
bool *continueS0Iteration,
size_t const THREAD) {
continueS0Iteration[THREAD] = true;
nIterS0[THREAD] = 0;
while (continueS0Iteration[THREAD]) {
nIterS0[THREAD]++;
// Get initial theta(1) parameter
theta1_old[THREAD] = theta[THREAD * nParams+0];
// Calculate new theta(1) parameter
theta[THREAD * nParams+0] = 0.0;
for (int i = 0; i < nDWIs; i++) {
theta[THREAD * nParams+0] += (b[THREAD * nDWIs + i] *
getBesseli1(twotau[THREAD * nDWIs + i]) /
getBesseli0(twotau[THREAD * nDWIs + i]));
}
theta[THREAD * nParams+0] = log(theta[THREAD * nParams+0]) -a[THREAD];
// Update twotau for the next iteration step
for (int i = 0; i < nDWIs; i++) {
twotau[THREAD * nDWIs + i] = b[THREAD * nDWIs + i] *
exp(theta[THREAD * nParams+0]) / SigmaSQ[THREAD];
}
// Test to end while loop
continueS0Iteration[THREAD] =
((nIterS0[THREAD] < iterLimitS0)
&&
(fabs((theta[THREAD * nParams + 0] - theta1_old[THREAD]) / theta1_old[THREAD])));
}
}
__device__ void calculateFisherInformation(
double *fisherInformation,
double *fisherInformation_sym,
double *Z,
double *score,
double *DeltaTheta,
double *expo,
double *EN,
double *expScaling,
const unsigned int nDWIs,
const unsigned int nParams,
const unsigned int nDeltaParams,
size_t const THREAD) {
for (int j = 1; j < nParams; j++) {
score[THREAD * nDeltaParams + j - 1] = 0.0;
for (int i = 0; i < nDWIs; i++) {
score[THREAD * nDeltaParams + j - 1] +=
2.0 * Z[j * nDWIs + i] * (EN[THREAD * nDWIs + i] -
expo[THREAD * nDWIs + i] * expScaling[THREAD]);
for (int k = 1; k < nParams; k++) { // range of j and k are [1 to nParams]
fisherInformation[THREAD * nDeltaParams*nDeltaParams + (j - 1)*nDeltaParams + (k - 1)] +=
4.0 * Z[j * nDWIs + i] * Z[k * nDWIs + i] * expo[THREAD * nDWIs + i];
// Symmetrize Fisher Information
fisherInformation_sym[THREAD * nDeltaParams*nDeltaParams + (j - 1)*nDeltaParams + (k - 1)] =
(fisherInformation[THREAD * nDeltaParams*nDeltaParams + (j - 1)*nDeltaParams + (k - 1)] +
fisherInformation[THREAD * nDeltaParams*nDeltaParams + (j - 1)*nDeltaParams + (k - 1)]) *
0.5 * expScaling[THREAD];
}
}
DeltaTheta[THREAD * nDeltaParams + j - 1] = score[THREAD *nDeltaParams + j - 1];
}
// Make copy of symmetric Fisher information matrix
for (int i = 0; i < nDeltaParams*nDeltaParams; i++) {
fisherInformation[THREAD * nDeltaParams * nDeltaParams + i] = fisherInformation_sym[THREAD * nDeltaParams * nDeltaParams + i];
}
}
__device__ void iterateLoglikelihood(
int *indx,
double *score,
double *vv,
double *DeltaTheta,
double *Z,
double *expo,
double *theta,
double *loglikelihood,
double *loglikelihood_old,
double *new_theta,
double *regulatorLambda,
double *fisherInformation,
double *fisherInformation_sym,
double *ZTheta,
double *c,
double *scaling,
double *expScaling,
double *EN,
const unsigned int nDWIs,
const unsigned int nParams,
const unsigned int nDeltaParams,
const double regulatorLambda0,
const double regulatorRescaling,
unsigned int *nIterLoglikelihood,
const unsigned int iterLimitLoglikelihood,
const double toleranceLoglikelihood,
bool *continueLoglikelihoodIteration,
size_t const THREAD) {
nIterLoglikelihood[THREAD] = 0;
continueLoglikelihoodIteration[THREAD] = true;
regulatorLambda[THREAD] = regulatorLambda0;
while (continueLoglikelihoodIteration[THREAD]) {
nIterLoglikelihood[THREAD]++;
//loglikelihood_old[THREAD] = loglikelihood[THREAD]; // loglikelihood_old is not supposed to be updated in this loop
// Initialize DeltaTheta for LUdecomposition & substitutions
// because X = I\score calculated using LUsubstitutions actually
// replaces values in score and we don't want to loose that information
// so we have to save score into DeltaTheta variable
for (int j = 1; j < nParams; j++) {
DeltaTheta[THREAD * nDeltaParams + j - 1] = score[THREAD *nDeltaParams + j - 1];
}
// Regularize Fisher information matrix with lambda
for (int i = 0; i < nDeltaParams; i++) {
fisherInformation[THREAD * nDeltaParams*nDeltaParams + i*nDeltaParams + i] =
fisherInformation_sym[THREAD * nDeltaParams*nDeltaParams + i*nDeltaParams + i]
+ regulatorLambda[THREAD];
}
// Update regulatorLambda
regulatorLambda[THREAD] *= regulatorRescaling;
//LUdecomposition(fisherInformation, nDeltaParams, indx, vv, THREAD);
//LUsubstitutions(fisherInformation, nDeltaParams, indx, DeltaTheta, THREAD);
CholeskyDecomposition(fisherInformation, nDeltaParams, vv, THREAD);
CholeskyBacksubstitution(fisherInformation, nDeltaParams, vv, score, DeltaTheta, THREAD);
//goto THE_END_LOGLIKELIHOOD;
// Calculate new theta(2:end)
for (int i = 1; i < nParams; i++) {
new_theta[THREAD * nDeltaParams + i - 1] =
theta[THREAD * nParams + i]
+ DeltaTheta[THREAD * nDeltaParams + i - 1];
}
// Calculate ZTheta based on new_theta
for (int i = 0; i < nDWIs; i++) {
ZTheta[THREAD * nDWIs + i] = 0.0;
for (int j = 1; j < nParams; j++) {
ZTheta[THREAD * nDWIs + i] +=
Z[j* nDWIs + i] * new_theta[THREAD * nDeltaParams + j - 1];
}
ZTheta[THREAD * nDWIs + i] *= 2.0;
ZTheta[THREAD * nDWIs + i] += c[THREAD]; // c is based on theta(1) and sigmasq that are constant in this loop
}
scaling[THREAD] = getMax(ZTheta, nDWIs, THREAD);
expScaling[THREAD] = exp(scaling[THREAD]);
// Calculate new loglikelihood
// calculateLoglikelihood updates loglikelihood and expo variables
calculateLoglikelihood(loglikelihood, expo, ZTheta, scaling, expScaling, EN, nDWIs, THREAD);
// Check if new loglikelihood is NaN, if so more regulation is needed
// (f != f) is true only if f is NaN (IEEE standard)
if (loglikelihood[THREAD] != loglikelihood[THREAD]) {
// loglikelihood is NaN, check only iterations
continueLoglikelihoodIteration[THREAD] = (nIterLoglikelihood[THREAD] < iterLimitLoglikelihood);
}
else {
continueLoglikelihoodIteration[THREAD] =
((loglikelihood[THREAD] < loglikelihood_old[THREAD])
&&
(nIterLoglikelihood[THREAD] < iterLimitLoglikelihood));
}
}
//THE_END_LOGLIKELIHOOD:
}
__device__ void iterateTheta(
int *indx,
double *vv,
double *theta,
double *ZTheta,
double *c,
double *fisherInformation,
double *fisherInformation_sym,
double *score,
double *Z,
double *EN,
double *scaling,
double *expScaling,
double *expo,
double *DeltaTheta,
double *DeltaThetaScore,
double *new_theta,
double *loglikelihood,
double *loglikelihood_old,
double *regulatorLambda,
const double regulatorLambda0,
const double regulatorRescaling,
const unsigned int nDWIs,
const unsigned int nParams,
const unsigned int nDeltaParams,
unsigned int *nIterTheta,
unsigned int *nIterLoglikelihood,
const unsigned int iterLimitTheta,
const unsigned int iterLimitLoglikelihood,
const double toleranceTheta,
const double toleranceLoglikelihood,
bool *continueThetaIteration,
bool *continueLoglikelihoodIteration,
size_t const THREAD) {
// Now indexing for i ranges [0, nDWIs-1] and j ranges [1, nParams] since first nParams is the theta(1)
continueThetaIteration[THREAD] = true;
nIterTheta[THREAD] = 0;
loglikelihood_old[THREAD] = loglikelihood[THREAD];
while (continueThetaIteration[THREAD]) {
nIterTheta[THREAD]++;
calculateFisherInformation(fisherInformation, fisherInformation_sym, Z, score, DeltaTheta, expo, EN, expScaling, nDWIs, nParams, nDeltaParams, THREAD);
// Optimize loglikelihood
iterateLoglikelihood(indx, score, vv, DeltaTheta, Z, expo, theta, loglikelihood, loglikelihood_old, new_theta, regulatorLambda, fisherInformation, fisherInformation_sym, ZTheta, c, scaling, expScaling, EN, nDWIs, nParams, nDeltaParams, regulatorLambda0, regulatorRescaling, nIterLoglikelihood, iterLimitLoglikelihood, toleranceLoglikelihood, continueLoglikelihoodIteration, THREAD);
//goto THE_END_THETA;
DeltaThetaScore[THREAD] = 0.0;
for (int i = 0; i < nDeltaParams; i++) {
DeltaThetaScore[THREAD] += DeltaTheta[THREAD * nDeltaParams + i]
* score[THREAD * nDeltaParams + i];
}
// Check if new loglikelihood is NaN, if not
// update theta(2:end) and loglikelihood_old
if (loglikelihood[THREAD] != loglikelihood[THREAD]) {
// NaN, don't update variables
continueThetaIteration[THREAD] = (nIterTheta[THREAD] < iterLimitTheta);
} else {
for (int i = 1; i < nParams; i++) {
theta[THREAD * nParams + i] = new_theta[THREAD * nDeltaParams + i - 1];
}
loglikelihood_old[THREAD] = loglikelihood[THREAD];
continueThetaIteration[THREAD] =
(((DeltaThetaScore[THREAD] > toleranceTheta)
||
((loglikelihood[THREAD] - loglikelihood_old[THREAD]) > toleranceLoglikelihood))
&&
(nIterTheta[THREAD] < iterLimitTheta));
}
}
//THE_END_THETA:
}
__device__ void calculateNorms(
double *norm1,
double *norm2,
double *theta,
double *theta_old,
const unsigned int nParams,
size_t const THREAD) {
norm1[THREAD] = 0.0;
norm2[THREAD] = 0.0;
for (int i = 0; i < nParams; i++) {
norm1[THREAD] += theta_old[THREAD * nParams + i] * theta_old[THREAD * nParams + i];
norm2[THREAD] += (theta[THREAD * nParams + i] - theta_old[THREAD * nParams + i])*
(theta[THREAD * nParams + i] - theta_old[THREAD * nParams + i]);
}
norm1[THREAD] = sqrt(norm1[THREAD]);
norm2[THREAD] = sqrt(norm2[THREAD]);
}
__global__ void RicianMLE(
double *theta,
double *SigmaSQ,
double *Z,
double *fisherInformation,
double *fisherInformation_sym,
double *score,
double *DeltaTheta,
double *new_theta,
double *vv,
int *indx,
double *theta_old,
double *Y,
double *expZTheta,
double *ZTheta,
double *twotau,
double *expo,
double *EN,
double *b,
double *a,
double *c,
double *sumYSQ,
double *theta1_old,
double *SigmaSQ0,
double *SigmaSQ_old,
double *tmpdouble,
double *scaling,
double *expScaling,
double *loglikelihood,
double *loglikelihood_old,
double *regulatorLambda,
double *DeltaThetaScore,
double *norm1,
double *norm2,
unsigned int *nIterSigmaSQ,
unsigned int *nIterVoxel,
unsigned int *nIterS0,
unsigned int *nIterTheta,
unsigned int *nIterLoglikelihood,
bool *continueSigmaSQIteration,
bool *continueVoxelIteration,
bool *continueS0Iteration,
bool *continueThetaIteration,
bool *continueLoglikelihoodIteration,
bool *anyEN,
const double toleranceSigmaSQ,
const double toleranceS0,
const double toleranceTheta,
const double toleranceLoglikelihood,
const unsigned int iterLimitSigmaSQ,
const unsigned int iterLimitVoxel,
const unsigned int iterLimitS0,
const unsigned int iterLimitTheta,
const unsigned int iterLimitLoglikelihood,
const double regulatorLambda0,
const double regulatorRescaling,
const unsigned int nDWIs,
const unsigned int nParams,
const unsigned int nDeltaParams,
const unsigned int nVoxels) {
// Initial, work out which THREAD i.e. voxel we are computing
size_t const THREAD = calculateGlobalIndex();
if (THREAD >= nVoxels) {
return;
}
// First, optimize Rician loglikelihood w.r.t. SigmaSQ
calculateExpZTheta( expZTheta, theta, Z, nParams, nDWIs, THREAD);
calculateAB_1(a, b, Y, expZTheta, sumYSQ, nDWIs, THREAD);
iterateSigmaSQ(SigmaSQ, SigmaSQ0, tmpdouble, a, b, twotau, nIterSigmaSQ, iterLimitSigmaSQ, toleranceSigmaSQ, nDWIs, continueSigmaSQIteration, THREAD);
// Start voxel-wise optimization
continueVoxelIteration[THREAD] = true;
while (continueVoxelIteration[THREAD]) {
nIterVoxel[THREAD]++;
// Save initial theta and SigmaSQ to be used later to test if voxel optimization continues
SigmaSQ_old[THREAD] = SigmaSQ[THREAD];
for (int i = 0; i < nParams; i++) {
theta_old[THREAD * nParams + i] = theta[THREAD * nParams + i];
}
// Second, optimize w.r.t. S0 i.e. theta(1) with fixed theta(2:end) and SigmaSQ
// calcuateAB_2 updates a,b, expZTheta, and twotau variables
calculateAB_2(a, b, Y, Z, theta, SigmaSQ, expZTheta, twotau, nDWIs, nParams, THREAD);
// iterateS0 updates theta(1) and twotau variables
iterateS0(theta, theta1_old, SigmaSQ, a, b, twotau, nIterS0, iterLimitS0, toleranceS0, nDWIs, nParams, continueS0Iteration, THREAD);
// Third, optimize w.r.t. theta(2:end) with fixed theta(1) and SigmaSQ
// calculateEN updates conditional expectation EN and checks if any(EN > 0)
calculateEN(EN, twotau, nDWIs, anyEN, THREAD);
if (anyEN[THREAD]) {
// There is information to estimate tensor(s)
// calculateZTheta updates c and ZTheta variables
calculateZTheta(c, ZTheta, theta, SigmaSQ, Z, nDWIs, nParams, THREAD);
scaling[THREAD] = getMax(ZTheta, nDWIs, THREAD);
expScaling[THREAD] = exp(scaling[THREAD]);
// calculateLoglikelihood updates loglikelihood and expo variables
calculateLoglikelihood(loglikelihood, expo, ZTheta, scaling, expScaling, EN, nDWIs, THREAD);
initializeInformationMatrices(fisherInformation, fisherInformation_sym, nDeltaParams, THREAD);
iterateTheta(indx, vv, theta, ZTheta, c, fisherInformation, fisherInformation_sym, score, Z, EN, scaling, expScaling, expo, DeltaTheta, DeltaThetaScore, new_theta, loglikelihood, loglikelihood_old, regulatorLambda, regulatorLambda0, regulatorRescaling, nDWIs, nParams, nDeltaParams, nIterTheta, nIterLoglikelihood, iterLimitTheta, iterLimitLoglikelihood, toleranceTheta, toleranceLoglikelihood, continueThetaIteration, continueLoglikelihoodIteration, THREAD);
//goto THE_END;
}
else {
// There is no information for estimations
// Set theta(2:end) and information to zero
for (int i = 1; i < nParams; i++) {
theta[THREAD * nParams + i] = 0.0;
}
initializeInformationMatrices(fisherInformation, fisherInformation_sym, nDeltaParams, THREAD);
}
// Last, optimize w.r.t. SigmaSQ with fixed theta
calculateExpZTheta(expZTheta, theta, Z, nParams, nDWIs, THREAD);
calculateAB_1(a, b, Y, expZTheta, sumYSQ, nDWIs, THREAD);
iterateSigmaSQ(SigmaSQ, SigmaSQ0, tmpdouble, a, b, twotau, nIterSigmaSQ, iterLimitSigmaSQ, toleranceSigmaSQ, nDWIs, continueSigmaSQIteration, THREAD);
calculateNorms(norm1, norm2, theta, theta_old, nParams, THREAD);
continueVoxelIteration[THREAD] =
(((fabs((SigmaSQ[THREAD] - SigmaSQ_old[THREAD]) / SigmaSQ_old[THREAD]) > toleranceSigmaSQ)
||
((norm2[THREAD] / norm1[THREAD]) > toleranceTheta))
&&
(nIterVoxel[THREAD] < iterLimitVoxel));
}
//THE_END:
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | /* Rician MLE diffusion and kurtosis tensor estimator by Viljami Sairanen (2016)
Based on algorithm in:
"Liu, Jia, Dario Gasbarra, and Juha Railavo.
"Fast Estimation of Diffusion Tensors under
Rician noise by the EM algorithm."
Journal of neuroscience methods 257 (2016) : 147 - 158" */
// to convert between single and double precision use following changes:
// double <-> double
// sqrt( <-> sqrt(
// fabs( <-> fabs(
// exp( <-> exp(
// log( <-> log(
#include <hip/hip_runtime.h>
#include <math.h>
__device__ size_t calculateGlobalIndex() {
// Which block are we?
size_t const globalBlockIndex = blockIdx.x + blockIdx.y * gridDim.x;
// Which THREAD are we within the block?
size_t const localthreadIdx = threadIdx.x + blockDim.x * threadIdx.y;
// How big is each block?
size_t const threadsPerBlock = blockDim.x*blockDim.y;
// Which THREAD are we overall?
return localthreadIdx + globalBlockIndex*threadsPerBlock;
}
__device__ double getBesseli0(double x) {
double ax, ans, y;
ax = fabs(x);
if (ax < 3.75) {
y = x / 3.75;
y *= y;
ans = 1.0 + y*(3.5156229 + y*(3.0899424 + y*(1.2067492 +
y*(0.2659732 + y*(0.360768e-1 + y*0.45813e-2)))));
ans *= exp(-ax); // scale by exp(-abs(real(x))); see matlab help for besseli
}
else {
y = 3.75 / ax;
ans = (1.0 / sqrt(ax)) * // scale by exp(-abs(real(x))); see matlab help for besseli
(0.39894228 + y * (0.1328592e-1
+ y * (0.225319e-2 + y * (-0.157565e-2 + y * (0.916281e-2
+ y * (-0.2057706e-1 + y * (0.2635537e-1 + y * (-0.1647633e-1
+ y * (0.392377e-2)))))))));
}
return ans;
}
__device__ double getBesseli1(double x) {
double ax, ans, y;
ax = fabs(x);
if (ax < 3.75) {
y = x / 3.75;
y *= y;
ans = ax * (0.5 + y *(0.87890594 + y *(0.51498869 + y *(0.15084934
+ y * (0.2658733e-1 + y * (0.301532e-2 + y * 0.32411e-3))))));
ans *= exp(-ax); // scale by exp(-abs(real(x))); see matlab help for besseli
}
else {
y = 3.75 / ax;
ans = 0.2282967e-1 + y * (-0.2895312e-1 + y * (0.1787654e-1
- y * 0.420059e-2));
ans = 0.39894228 + y * (-0.3988024e-1 + y * (-0.362018e-2
+ y * (0.163801e-2 + y * (-0.1031555e-1 + y * ans))));
ans *= 1.0 / sqrt(ax); // scale by exp(-abs(real(x))); see matlab help for besseli
}
return x < 0.0 ? -ans : ans;
}
__device__ double getMax(
double *arr,
const unsigned int length,
size_t const THREAD) {
double ans;
ans = arr[THREAD * length];
for (int i = 1; i < length; i++) {
if (arr[THREAD * length + i] > ans) {
ans = arr[THREAD * length + i];
}
}
return ans;
}
__device__ void LUdecomposition(double *a, int n, int *indx, double *vv, size_t const THREAD) {
int i, imax, j, k;
double big, dum, sum, temp;
for (i = 0; i<n; i++) {
big = 0.0;
for (j = 0; j<n; j++) {
temp = fabs(a[THREAD * n * n+ i*n + j]);
if (temp >= big) {
big = temp;
}
}
if (big == 0.0) { // Singular matrix can't compute
big = 1.0e-20;
}
vv[THREAD * n + i] = 1.0 / big;
}
for (j = 0; j<n; j++) {
for (i = 0; i<j; i++) {
sum = a[THREAD * n * n+ i*n + j];
for (k = 0; k<i; k++) {
sum -= a[THREAD * n * n+ i*n + k] * a[THREAD * n * n+ k*n + j];
}
a[THREAD * n * n+ i*n + j] = sum;
}
big = 0.0;
for (i = j; i<n; i++) {
sum = a[THREAD * n * n+ i*n + j];
for (k = 0; k<j; k++) {
sum -= a[THREAD * n * n+ i*n + k] * a[THREAD * n * n+ k*n + j];
}
a[THREAD * n * n+ i*n + j] = sum;
dum = vv[THREAD * n+ i] * fabs(sum);
if (dum >= big) {
big = dum;
imax = i;
}
}
if (j != imax) {
for (k = 0; k<n; k++) {
dum = a[THREAD * n * n+ imax*n + k];
a[imax*n + k] = a[THREAD * n * n+ j*n + k];
a[THREAD * n * n+ j*n + k] = dum;
}
vv[THREAD * n+ imax] = vv[THREAD * n+ j];
}
indx[THREAD * n+ j] = imax;
if (a[THREAD * n * n+ j*n + j] == 0.0) {
a[THREAD * n * n+ j*n + j] = 1.0e-20;
}
if (j != n) {
dum = 1.0 / a[THREAD * n * n+ j*n + j];
for (i = j + 1; i<n; i++) {
a[THREAD * n * n+ i*n + j] *= dum;
}
}
}
}
__device__ void LUsubstitutions(double *a, int n, int *indx, double *b, size_t const THREAD) {
int i, ii = 0, ip, j;
double sum;
for (i = 0; i<n; i++) {
ip = indx[(THREAD * n) + i];
sum = b[(THREAD * n) + ip];
b[(THREAD * n) + ip] = b[(THREAD * n) + i];
if (ii != 0) {
for (j = ii - 1; j<i; j++) {
sum -= a[(THREAD * n * n) + (i * n) + j] * b[(THREAD * n) + j];
}
}
else if (sum != 0) {
ii = i + 1;
}
b[(THREAD * n) + i] = sum;
}
for (i = n - 1; i >= 0; i--) {
sum = b[(THREAD * n) + i];
for (j = i + 1; j<n; j++) {
sum -= a[(THREAD * n * n) + (i * n) + j] * b[(THREAD * n) + j];
}
b[(THREAD * n) + i] = sum / a[(THREAD * n * n) + (i * n) + i];
}
}
__device__ void CholeskyDecomposition(double *a, int n, double *p, size_t const THREAD) {
int i, j, k;
double sum;
for (i = 0; i < n; i++) {
for (j = i; j < n; j++) {
sum = a[(THREAD * n * n) + (i*n) + j];
for (k = i-1; k >= 0; k--) {
sum -= a[(THREAD * n * n) + (i*n) + k]
* a[(THREAD * n * n) + (j*n) + k];
}
if (i == j) {
if (sum <= 0.0) {
sum = 1.0e-20; // Cholesky decomposition failed
}
p[THREAD*n + i] = sqrt(sum);
}
else {
a[(THREAD*n*n) + (j*n) + i] = sum / p[THREAD*n + i];
}
}
}
}
__device__ void CholeskyBacksubstitution(double *a, int n, double *p, double *b, double *x, size_t const THREAD) {
int i, k;
double sum;
for (i = 0; i < n; i++) { // Solve Ly=b, storing y in x
sum = b[THREAD*n + i];
for (k = i-1; k >= 0; k--) {
sum -= a[(THREAD*n*n) + (i*n) + k] * x[THREAD*n + k];
}
x[THREAD*n + i] = sum / p[THREAD*n + i];
}
for (i = n; i >= 0; i--) { // Solve L^(T)x=y
sum = x[THREAD*n + i];
for (k = i+1; k < n; k++) {
sum -= a[(THREAD*n*n) + (k*n) + i] * x[THREAD*n + k];
}
x[THREAD*n + i] = sum / p[THREAD*n + i];
}
}
__device__ void calculateExpZTheta(
double *expZTheta,
double *theta,
double *Z,
const unsigned int nParams,
const unsigned int nDWIs,
size_t const THREAD) {
for (int i = 0; i < nDWIs; i++) {
expZTheta[THREAD * nDWIs + i] = 0.0;
for (int j = 0; j < nParams; j++) {
expZTheta[THREAD * nDWIs + i] +=
Z[j * nDWIs + i] * theta[THREAD * nParams + j];
}
expZTheta[THREAD * nDWIs + i] = exp(expZTheta[THREAD * nDWIs + i]);
}
}
__device__ void calculateAB_1(
double *a,
double *b,
double *Y,
double *expZTheta,
double *sumYSQ,
const unsigned int nDWIs,
size_t const THREAD) {
a[THREAD] = sumYSQ[THREAD];
for (int i = 0; i < nDWIs; i++) {
a[THREAD] += expZTheta[THREAD * nDWIs + i] * expZTheta[THREAD * nDWIs + i];
b[THREAD * nDWIs + i] = Y[THREAD * nDWIs + i] * expZTheta[THREAD * nDWIs + i];
}
}
__device__ void calculateAB_2(
double *a,
double *b,
double *Y,
double *Z,
double *theta,
double *SigmaSQ,
double *expZTheta,
double *twotau,
const unsigned int nDWIs,
const unsigned int nParams,
size_t const THREAD) {
// Now indexing for i ranges [0, nDWIs-1] and j ranges [1, nParams] since first nParams is the theta(1)
a[THREAD] = 0.0;
for (int i = 0; i < nDWIs; i++) {
expZTheta[THREAD * nDWIs + i] = 0.0;
for (int j = 1; j < nParams; j++) {
expZTheta[THREAD * nDWIs + i] +=
Z[j * nDWIs + i] * theta[THREAD * nParams + j];
}
expZTheta[THREAD * nDWIs + i] = exp(expZTheta[THREAD * nDWIs + i]);
a[THREAD] += expZTheta[THREAD * nDWIs + i] * expZTheta[THREAD * nDWIs + i];
b[THREAD * nDWIs + i] = Y[THREAD * nDWIs + i] * expZTheta[THREAD * nDWIs + i];
twotau[THREAD * nDWIs + i] = b[THREAD * nDWIs + i] * exp(theta[THREAD * nParams+0]) / SigmaSQ[THREAD];
}
a[THREAD] = log(a[THREAD]);
}
__device__ void calculateEN(
double *EN,
double *twotau,
const unsigned int nDWIs,
bool *anyEN,
size_t const THREAD) {
anyEN[THREAD] = false;
for (int i = 0; i < nDWIs; i++) {
EN[THREAD * nDWIs + i] = 0.5 * twotau[THREAD * nDWIs + i] *
getBesseli1(twotau[THREAD * nDWIs + i]) /
getBesseli0(twotau[THREAD * nDWIs + i]);
if (EN[THREAD * nDWIs + i] > 0.0) {
anyEN[THREAD] = true;
}
}
}
__device__ void calculateZTheta(
double *c,
double *ZTheta,
double *theta,
double *SigmaSQ,
double *Z,
const unsigned int nDWIs,
const unsigned int nParams,
size_t const THREAD) {
// Now indexing for i ranges [0, nDWIs-1] and j ranges [1, nParams] since first nParams is the theta(1)
c[THREAD] = 2.0 * theta[THREAD * nParams+0] -
log(2.0 * SigmaSQ[THREAD]);
for (int i = 0; i < nDWIs; i++) {
ZTheta[THREAD * nDWIs + i] = 0.0;
for (int j = 1; j < nParams; j++) {
ZTheta[THREAD * nDWIs + i] +=
Z[j * nDWIs + i] * theta[THREAD * nParams + j];
}
ZTheta[THREAD * nDWIs + i] *= 2.0;
ZTheta[THREAD * nDWIs + i] += c[THREAD];
}
}
__device__ void calculateLoglikelihood(
double *loglikelihood,
double *expo,
double *ZTheta,
double *scaling,
double *expScaling,
double *EN,
const unsigned int nDWIs,
size_t const THREAD) {
loglikelihood[THREAD] = 0.0;
for (int i = 0; i < nDWIs; i++) {
expo[THREAD * nDWIs + i] = exp(ZTheta[THREAD * nDWIs + i] - scaling[THREAD]);
loglikelihood[THREAD] +=
EN[THREAD * nDWIs + i] * ZTheta[THREAD * nDWIs + i]
- expo[THREAD * nDWIs + i] * expScaling[THREAD];
}
}
__device__ void initializeInformationMatrices(
double *fisherInformation,
double *fisherInformation_sym,
const unsigned int nDeltaParams,
size_t const THREAD) {
for (int i = 0; i < nDeltaParams*nDeltaParams; i++) {
fisherInformation[THREAD * nDeltaParams*nDeltaParams + i] = 0.0;
fisherInformation_sym[THREAD * nDeltaParams*nDeltaParams + i] = 0.0;
}
}
__device__ void iterateSigmaSQ(
double *SigmaSQ,
double *SigmaSQ0,
double *tmpdouble,
double *a,
double *b,
double *twotau,
unsigned int *nIterSigmaSQ,
unsigned int iterLimitSigmaSQ,
const double toleranceSigmaSQ,
const unsigned int nDWIs,
bool *continueSigmaSQIteration,
size_t const THREAD) {
// Should be ok
continueSigmaSQIteration[THREAD] = true;
nIterSigmaSQ[THREAD] = 0;
while (continueSigmaSQIteration[THREAD]) {
(nIterSigmaSQ[THREAD])++;
SigmaSQ0[THREAD] = SigmaSQ[THREAD];
tmpdouble[THREAD] = 0.0;
for (int i = 0; i < nDWIs; i++) {
twotau[THREAD * nDWIs + i] = b[THREAD * nDWIs + i] / SigmaSQ[THREAD];
tmpdouble[THREAD] += twotau[THREAD * nDWIs + i] *
getBesseli1(twotau[THREAD * nDWIs + i]) /
getBesseli0(twotau[THREAD * nDWIs + i]);
}
SigmaSQ[THREAD] = 0.5 * a[THREAD] / ((double)(nDWIs) + tmpdouble[THREAD]);
continueSigmaSQIteration[THREAD] =
((nIterSigmaSQ[THREAD] < iterLimitSigmaSQ)
&&
(fabs(SigmaSQ[THREAD] - SigmaSQ0[THREAD]) > toleranceSigmaSQ));
}
}
__device__ void iterateS0(
double *theta,
double *theta1_old,
double *SigmaSQ,
double *a,
double *b,
double *twotau,
unsigned int *nIterS0,
unsigned int iterLimitS0,
const double toleranceS0,
const unsigned int nDWIs,
const unsigned int nParams,
bool *continueS0Iteration,
size_t const THREAD) {
continueS0Iteration[THREAD] = true;
nIterS0[THREAD] = 0;
while (continueS0Iteration[THREAD]) {
nIterS0[THREAD]++;
// Get initial theta(1) parameter
theta1_old[THREAD] = theta[THREAD * nParams+0];
// Calculate new theta(1) parameter
theta[THREAD * nParams+0] = 0.0;
for (int i = 0; i < nDWIs; i++) {
theta[THREAD * nParams+0] += (b[THREAD * nDWIs + i] *
getBesseli1(twotau[THREAD * nDWIs + i]) /
getBesseli0(twotau[THREAD * nDWIs + i]));
}
theta[THREAD * nParams+0] = log(theta[THREAD * nParams+0]) -a[THREAD];
// Update twotau for the next iteration step
for (int i = 0; i < nDWIs; i++) {
twotau[THREAD * nDWIs + i] = b[THREAD * nDWIs + i] *
exp(theta[THREAD * nParams+0]) / SigmaSQ[THREAD];
}
// Test to end while loop
continueS0Iteration[THREAD] =
((nIterS0[THREAD] < iterLimitS0)
&&
(fabs((theta[THREAD * nParams + 0] - theta1_old[THREAD]) / theta1_old[THREAD])));
}
}
__device__ void calculateFisherInformation(
double *fisherInformation,
double *fisherInformation_sym,
double *Z,
double *score,
double *DeltaTheta,
double *expo,
double *EN,
double *expScaling,
const unsigned int nDWIs,
const unsigned int nParams,
const unsigned int nDeltaParams,
size_t const THREAD) {
for (int j = 1; j < nParams; j++) {
score[THREAD * nDeltaParams + j - 1] = 0.0;
for (int i = 0; i < nDWIs; i++) {
score[THREAD * nDeltaParams + j - 1] +=
2.0 * Z[j * nDWIs + i] * (EN[THREAD * nDWIs + i] -
expo[THREAD * nDWIs + i] * expScaling[THREAD]);
for (int k = 1; k < nParams; k++) { // range of j and k are [1 to nParams]
fisherInformation[THREAD * nDeltaParams*nDeltaParams + (j - 1)*nDeltaParams + (k - 1)] +=
4.0 * Z[j * nDWIs + i] * Z[k * nDWIs + i] * expo[THREAD * nDWIs + i];
// Symmetrize Fisher Information
fisherInformation_sym[THREAD * nDeltaParams*nDeltaParams + (j - 1)*nDeltaParams + (k - 1)] =
(fisherInformation[THREAD * nDeltaParams*nDeltaParams + (j - 1)*nDeltaParams + (k - 1)] +
fisherInformation[THREAD * nDeltaParams*nDeltaParams + (j - 1)*nDeltaParams + (k - 1)]) *
0.5 * expScaling[THREAD];
}
}
DeltaTheta[THREAD * nDeltaParams + j - 1] = score[THREAD *nDeltaParams + j - 1];
}
// Make copy of symmetric Fisher information matrix
for (int i = 0; i < nDeltaParams*nDeltaParams; i++) {
fisherInformation[THREAD * nDeltaParams * nDeltaParams + i] = fisherInformation_sym[THREAD * nDeltaParams * nDeltaParams + i];
}
}
__device__ void iterateLoglikelihood(
int *indx,
double *score,
double *vv,
double *DeltaTheta,
double *Z,
double *expo,
double *theta,
double *loglikelihood,
double *loglikelihood_old,
double *new_theta,
double *regulatorLambda,
double *fisherInformation,
double *fisherInformation_sym,
double *ZTheta,
double *c,
double *scaling,
double *expScaling,
double *EN,
const unsigned int nDWIs,
const unsigned int nParams,
const unsigned int nDeltaParams,
const double regulatorLambda0,
const double regulatorRescaling,
unsigned int *nIterLoglikelihood,
const unsigned int iterLimitLoglikelihood,
const double toleranceLoglikelihood,
bool *continueLoglikelihoodIteration,
size_t const THREAD) {
nIterLoglikelihood[THREAD] = 0;
continueLoglikelihoodIteration[THREAD] = true;
regulatorLambda[THREAD] = regulatorLambda0;
while (continueLoglikelihoodIteration[THREAD]) {
nIterLoglikelihood[THREAD]++;
//loglikelihood_old[THREAD] = loglikelihood[THREAD]; // loglikelihood_old is not supposed to be updated in this loop
// Initialize DeltaTheta for LUdecomposition & substitutions
// because X = I\score calculated using LUsubstitutions actually
// replaces values in score and we don't want to loose that information
// so we have to save score into DeltaTheta variable
for (int j = 1; j < nParams; j++) {
DeltaTheta[THREAD * nDeltaParams + j - 1] = score[THREAD *nDeltaParams + j - 1];
}
// Regularize Fisher information matrix with lambda
for (int i = 0; i < nDeltaParams; i++) {
fisherInformation[THREAD * nDeltaParams*nDeltaParams + i*nDeltaParams + i] =
fisherInformation_sym[THREAD * nDeltaParams*nDeltaParams + i*nDeltaParams + i]
+ regulatorLambda[THREAD];
}
// Update regulatorLambda
regulatorLambda[THREAD] *= regulatorRescaling;
//LUdecomposition(fisherInformation, nDeltaParams, indx, vv, THREAD);
//LUsubstitutions(fisherInformation, nDeltaParams, indx, DeltaTheta, THREAD);
CholeskyDecomposition(fisherInformation, nDeltaParams, vv, THREAD);
CholeskyBacksubstitution(fisherInformation, nDeltaParams, vv, score, DeltaTheta, THREAD);
//goto THE_END_LOGLIKELIHOOD;
// Calculate new theta(2:end)
for (int i = 1; i < nParams; i++) {
new_theta[THREAD * nDeltaParams + i - 1] =
theta[THREAD * nParams + i]
+ DeltaTheta[THREAD * nDeltaParams + i - 1];
}
// Calculate ZTheta based on new_theta
for (int i = 0; i < nDWIs; i++) {
ZTheta[THREAD * nDWIs + i] = 0.0;
for (int j = 1; j < nParams; j++) {
ZTheta[THREAD * nDWIs + i] +=
Z[j* nDWIs + i] * new_theta[THREAD * nDeltaParams + j - 1];
}
ZTheta[THREAD * nDWIs + i] *= 2.0;
ZTheta[THREAD * nDWIs + i] += c[THREAD]; // c is based on theta(1) and sigmasq that are constant in this loop
}
scaling[THREAD] = getMax(ZTheta, nDWIs, THREAD);
expScaling[THREAD] = exp(scaling[THREAD]);
// Calculate new loglikelihood
// calculateLoglikelihood updates loglikelihood and expo variables
calculateLoglikelihood(loglikelihood, expo, ZTheta, scaling, expScaling, EN, nDWIs, THREAD);
// Check if new loglikelihood is NaN, if so more regulation is needed
// (f != f) is true only if f is NaN (IEEE standard)
if (loglikelihood[THREAD] != loglikelihood[THREAD]) {
// loglikelihood is NaN, check only iterations
continueLoglikelihoodIteration[THREAD] = (nIterLoglikelihood[THREAD] < iterLimitLoglikelihood);
}
else {
continueLoglikelihoodIteration[THREAD] =
((loglikelihood[THREAD] < loglikelihood_old[THREAD])
&&
(nIterLoglikelihood[THREAD] < iterLimitLoglikelihood));
}
}
//THE_END_LOGLIKELIHOOD:
}
__device__ void iterateTheta(
int *indx,
double *vv,
double *theta,
double *ZTheta,
double *c,
double *fisherInformation,
double *fisherInformation_sym,
double *score,
double *Z,
double *EN,
double *scaling,
double *expScaling,
double *expo,
double *DeltaTheta,
double *DeltaThetaScore,
double *new_theta,
double *loglikelihood,
double *loglikelihood_old,
double *regulatorLambda,
const double regulatorLambda0,
const double regulatorRescaling,
const unsigned int nDWIs,
const unsigned int nParams,
const unsigned int nDeltaParams,
unsigned int *nIterTheta,
unsigned int *nIterLoglikelihood,
const unsigned int iterLimitTheta,
const unsigned int iterLimitLoglikelihood,
const double toleranceTheta,
const double toleranceLoglikelihood,
bool *continueThetaIteration,
bool *continueLoglikelihoodIteration,
size_t const THREAD) {
// Now indexing for i ranges [0, nDWIs-1] and j ranges [1, nParams] since first nParams is the theta(1)
continueThetaIteration[THREAD] = true;
nIterTheta[THREAD] = 0;
loglikelihood_old[THREAD] = loglikelihood[THREAD];
while (continueThetaIteration[THREAD]) {
nIterTheta[THREAD]++;
calculateFisherInformation(fisherInformation, fisherInformation_sym, Z, score, DeltaTheta, expo, EN, expScaling, nDWIs, nParams, nDeltaParams, THREAD);
// Optimize loglikelihood
iterateLoglikelihood(indx, score, vv, DeltaTheta, Z, expo, theta, loglikelihood, loglikelihood_old, new_theta, regulatorLambda, fisherInformation, fisherInformation_sym, ZTheta, c, scaling, expScaling, EN, nDWIs, nParams, nDeltaParams, regulatorLambda0, regulatorRescaling, nIterLoglikelihood, iterLimitLoglikelihood, toleranceLoglikelihood, continueLoglikelihoodIteration, THREAD);
//goto THE_END_THETA;
DeltaThetaScore[THREAD] = 0.0;
for (int i = 0; i < nDeltaParams; i++) {
DeltaThetaScore[THREAD] += DeltaTheta[THREAD * nDeltaParams + i]
* score[THREAD * nDeltaParams + i];
}
// Check if new loglikelihood is NaN, if not
// update theta(2:end) and loglikelihood_old
if (loglikelihood[THREAD] != loglikelihood[THREAD]) {
// NaN, don't update variables
continueThetaIteration[THREAD] = (nIterTheta[THREAD] < iterLimitTheta);
} else {
for (int i = 1; i < nParams; i++) {
theta[THREAD * nParams + i] = new_theta[THREAD * nDeltaParams + i - 1];
}
loglikelihood_old[THREAD] = loglikelihood[THREAD];
continueThetaIteration[THREAD] =
(((DeltaThetaScore[THREAD] > toleranceTheta)
||
((loglikelihood[THREAD] - loglikelihood_old[THREAD]) > toleranceLoglikelihood))
&&
(nIterTheta[THREAD] < iterLimitTheta));
}
}
//THE_END_THETA:
}
__device__ void calculateNorms(
double *norm1,
double *norm2,
double *theta,
double *theta_old,
const unsigned int nParams,
size_t const THREAD) {
norm1[THREAD] = 0.0;
norm2[THREAD] = 0.0;
for (int i = 0; i < nParams; i++) {
norm1[THREAD] += theta_old[THREAD * nParams + i] * theta_old[THREAD * nParams + i];
norm2[THREAD] += (theta[THREAD * nParams + i] - theta_old[THREAD * nParams + i])*
(theta[THREAD * nParams + i] - theta_old[THREAD * nParams + i]);
}
norm1[THREAD] = sqrt(norm1[THREAD]);
norm2[THREAD] = sqrt(norm2[THREAD]);
}
__global__ void RicianMLE(
double *theta,
double *SigmaSQ,
double *Z,
double *fisherInformation,
double *fisherInformation_sym,
double *score,
double *DeltaTheta,
double *new_theta,
double *vv,
int *indx,
double *theta_old,
double *Y,
double *expZTheta,
double *ZTheta,
double *twotau,
double *expo,
double *EN,
double *b,
double *a,
double *c,
double *sumYSQ,
double *theta1_old,
double *SigmaSQ0,
double *SigmaSQ_old,
double *tmpdouble,
double *scaling,
double *expScaling,
double *loglikelihood,
double *loglikelihood_old,
double *regulatorLambda,
double *DeltaThetaScore,
double *norm1,
double *norm2,
unsigned int *nIterSigmaSQ,
unsigned int *nIterVoxel,
unsigned int *nIterS0,
unsigned int *nIterTheta,
unsigned int *nIterLoglikelihood,
bool *continueSigmaSQIteration,
bool *continueVoxelIteration,
bool *continueS0Iteration,
bool *continueThetaIteration,
bool *continueLoglikelihoodIteration,
bool *anyEN,
const double toleranceSigmaSQ,
const double toleranceS0,
const double toleranceTheta,
const double toleranceLoglikelihood,
const unsigned int iterLimitSigmaSQ,
const unsigned int iterLimitVoxel,
const unsigned int iterLimitS0,
const unsigned int iterLimitTheta,
const unsigned int iterLimitLoglikelihood,
const double regulatorLambda0,
const double regulatorRescaling,
const unsigned int nDWIs,
const unsigned int nParams,
const unsigned int nDeltaParams,
const unsigned int nVoxels) {
// Initial, work out which THREAD i.e. voxel we are computing
size_t const THREAD = calculateGlobalIndex();
if (THREAD >= nVoxels) {
return;
}
// First, optimize Rician loglikelihood w.r.t. SigmaSQ
calculateExpZTheta( expZTheta, theta, Z, nParams, nDWIs, THREAD);
calculateAB_1(a, b, Y, expZTheta, sumYSQ, nDWIs, THREAD);
iterateSigmaSQ(SigmaSQ, SigmaSQ0, tmpdouble, a, b, twotau, nIterSigmaSQ, iterLimitSigmaSQ, toleranceSigmaSQ, nDWIs, continueSigmaSQIteration, THREAD);
// Start voxel-wise optimization
continueVoxelIteration[THREAD] = true;
while (continueVoxelIteration[THREAD]) {
nIterVoxel[THREAD]++;
// Save initial theta and SigmaSQ to be used later to test if voxel optimization continues
SigmaSQ_old[THREAD] = SigmaSQ[THREAD];
for (int i = 0; i < nParams; i++) {
theta_old[THREAD * nParams + i] = theta[THREAD * nParams + i];
}
// Second, optimize w.r.t. S0 i.e. theta(1) with fixed theta(2:end) and SigmaSQ
// calcuateAB_2 updates a,b, expZTheta, and twotau variables
calculateAB_2(a, b, Y, Z, theta, SigmaSQ, expZTheta, twotau, nDWIs, nParams, THREAD);
// iterateS0 updates theta(1) and twotau variables
iterateS0(theta, theta1_old, SigmaSQ, a, b, twotau, nIterS0, iterLimitS0, toleranceS0, nDWIs, nParams, continueS0Iteration, THREAD);
// Third, optimize w.r.t. theta(2:end) with fixed theta(1) and SigmaSQ
// calculateEN updates conditional expectation EN and checks if any(EN > 0)
calculateEN(EN, twotau, nDWIs, anyEN, THREAD);
if (anyEN[THREAD]) {
// There is information to estimate tensor(s)
// calculateZTheta updates c and ZTheta variables
calculateZTheta(c, ZTheta, theta, SigmaSQ, Z, nDWIs, nParams, THREAD);
scaling[THREAD] = getMax(ZTheta, nDWIs, THREAD);
expScaling[THREAD] = exp(scaling[THREAD]);
// calculateLoglikelihood updates loglikelihood and expo variables
calculateLoglikelihood(loglikelihood, expo, ZTheta, scaling, expScaling, EN, nDWIs, THREAD);
initializeInformationMatrices(fisherInformation, fisherInformation_sym, nDeltaParams, THREAD);
iterateTheta(indx, vv, theta, ZTheta, c, fisherInformation, fisherInformation_sym, score, Z, EN, scaling, expScaling, expo, DeltaTheta, DeltaThetaScore, new_theta, loglikelihood, loglikelihood_old, regulatorLambda, regulatorLambda0, regulatorRescaling, nDWIs, nParams, nDeltaParams, nIterTheta, nIterLoglikelihood, iterLimitTheta, iterLimitLoglikelihood, toleranceTheta, toleranceLoglikelihood, continueThetaIteration, continueLoglikelihoodIteration, THREAD);
//goto THE_END;
}
else {
// There is no information for estimations
// Set theta(2:end) and information to zero
for (int i = 1; i < nParams; i++) {
theta[THREAD * nParams + i] = 0.0;
}
initializeInformationMatrices(fisherInformation, fisherInformation_sym, nDeltaParams, THREAD);
}
// Last, optimize w.r.t. SigmaSQ with fixed theta
calculateExpZTheta(expZTheta, theta, Z, nParams, nDWIs, THREAD);
calculateAB_1(a, b, Y, expZTheta, sumYSQ, nDWIs, THREAD);
iterateSigmaSQ(SigmaSQ, SigmaSQ0, tmpdouble, a, b, twotau, nIterSigmaSQ, iterLimitSigmaSQ, toleranceSigmaSQ, nDWIs, continueSigmaSQIteration, THREAD);
calculateNorms(norm1, norm2, theta, theta_old, nParams, THREAD);
continueVoxelIteration[THREAD] =
(((fabs((SigmaSQ[THREAD] - SigmaSQ_old[THREAD]) / SigmaSQ_old[THREAD]) > toleranceSigmaSQ)
||
((norm2[THREAD] / norm1[THREAD]) > toleranceTheta))
&&
(nIterVoxel[THREAD] < iterLimitVoxel));
}
//THE_END:
} | .text
.file "RicianMLE_double.hip"
.globl _Z24__device_stub__RicianMLEPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjj # -- Begin function _Z24__device_stub__RicianMLEPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjj
.p2align 4, 0x90
.type _Z24__device_stub__RicianMLEPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjj,@function
_Z24__device_stub__RicianMLEPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjj: # @_Z24__device_stub__RicianMLEPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjj
.cfi_startproc
# %bb.0:
subq $616, %rsp # imm = 0x268
.cfi_def_cfa_offset 624
movq %rdi, 136(%rsp)
movq %rsi, 128(%rsp)
movq %rdx, 120(%rsp)
movq %rcx, 112(%rsp)
movq %r8, 104(%rsp)
movq %r9, 96(%rsp)
movsd %xmm0, 88(%rsp)
movsd %xmm1, 80(%rsp)
movsd %xmm2, 72(%rsp)
movsd %xmm3, 64(%rsp)
movsd %xmm4, 56(%rsp)
movsd %xmm5, 48(%rsp)
leaq 136(%rsp), %rax
movq %rax, 144(%rsp)
leaq 128(%rsp), %rax
movq %rax, 152(%rsp)
leaq 120(%rsp), %rax
movq %rax, 160(%rsp)
leaq 112(%rsp), %rax
movq %rax, 168(%rsp)
leaq 104(%rsp), %rax
movq %rax, 176(%rsp)
leaq 96(%rsp), %rax
movq %rax, 184(%rsp)
leaq 624(%rsp), %rax
movq %rax, 192(%rsp)
leaq 632(%rsp), %rax
movq %rax, 200(%rsp)
leaq 640(%rsp), %rax
movq %rax, 208(%rsp)
leaq 648(%rsp), %rax
movq %rax, 216(%rsp)
leaq 656(%rsp), %rax
movq %rax, 224(%rsp)
leaq 664(%rsp), %rax
movq %rax, 232(%rsp)
leaq 672(%rsp), %rax
movq %rax, 240(%rsp)
leaq 680(%rsp), %rax
movq %rax, 248(%rsp)
leaq 688(%rsp), %rax
movq %rax, 256(%rsp)
leaq 696(%rsp), %rax
movq %rax, 264(%rsp)
leaq 704(%rsp), %rax
movq %rax, 272(%rsp)
leaq 712(%rsp), %rax
movq %rax, 280(%rsp)
leaq 720(%rsp), %rax
movq %rax, 288(%rsp)
leaq 728(%rsp), %rax
movq %rax, 296(%rsp)
leaq 736(%rsp), %rax
movq %rax, 304(%rsp)
leaq 744(%rsp), %rax
movq %rax, 312(%rsp)
leaq 752(%rsp), %rax
movq %rax, 320(%rsp)
leaq 760(%rsp), %rax
movq %rax, 328(%rsp)
leaq 768(%rsp), %rax
movq %rax, 336(%rsp)
leaq 776(%rsp), %rax
movq %rax, 344(%rsp)
leaq 784(%rsp), %rax
movq %rax, 352(%rsp)
leaq 792(%rsp), %rax
movq %rax, 360(%rsp)
leaq 800(%rsp), %rax
movq %rax, 368(%rsp)
leaq 808(%rsp), %rax
movq %rax, 376(%rsp)
leaq 816(%rsp), %rax
movq %rax, 384(%rsp)
leaq 824(%rsp), %rax
movq %rax, 392(%rsp)
leaq 832(%rsp), %rax
movq %rax, 400(%rsp)
leaq 840(%rsp), %rax
movq %rax, 408(%rsp)
leaq 848(%rsp), %rax
movq %rax, 416(%rsp)
leaq 856(%rsp), %rax
movq %rax, 424(%rsp)
leaq 864(%rsp), %rax
movq %rax, 432(%rsp)
leaq 872(%rsp), %rax
movq %rax, 440(%rsp)
leaq 880(%rsp), %rax
movq %rax, 448(%rsp)
leaq 888(%rsp), %rax
movq %rax, 456(%rsp)
leaq 896(%rsp), %rax
movq %rax, 464(%rsp)
leaq 904(%rsp), %rax
movq %rax, 472(%rsp)
leaq 912(%rsp), %rax
movq %rax, 480(%rsp)
leaq 920(%rsp), %rax
movq %rax, 488(%rsp)
leaq 88(%rsp), %rax
movq %rax, 496(%rsp)
leaq 80(%rsp), %rax
movq %rax, 504(%rsp)
leaq 72(%rsp), %rax
movq %rax, 512(%rsp)
leaq 64(%rsp), %rax
movq %rax, 520(%rsp)
leaq 928(%rsp), %rax
movq %rax, 528(%rsp)
leaq 936(%rsp), %rax
movq %rax, 536(%rsp)
leaq 944(%rsp), %rax
movq %rax, 544(%rsp)
leaq 952(%rsp), %rax
movq %rax, 552(%rsp)
leaq 960(%rsp), %rax
movq %rax, 560(%rsp)
leaq 56(%rsp), %rax
movq %rax, 568(%rsp)
leaq 48(%rsp), %rax
movq %rax, 576(%rsp)
leaq 968(%rsp), %rax
movq %rax, 584(%rsp)
leaq 976(%rsp), %rax
movq %rax, 592(%rsp)
leaq 984(%rsp), %rax
movq %rax, 600(%rsp)
leaq 992(%rsp), %rax
movq %rax, 608(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 144(%rsp), %r9
movl $_Z9RicianMLEPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjj, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $632, %rsp # imm = 0x278
.cfi_adjust_cfa_offset -632
retq
.Lfunc_end0:
.size _Z24__device_stub__RicianMLEPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjj, .Lfunc_end0-_Z24__device_stub__RicianMLEPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjj
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9RicianMLEPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjj, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9RicianMLEPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjj,@object # @_Z9RicianMLEPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjj
.section .rodata,"a",@progbits
.globl _Z9RicianMLEPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjj
.p2align 3, 0x0
_Z9RicianMLEPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjj:
.quad _Z24__device_stub__RicianMLEPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjj
.size _Z9RicianMLEPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjj, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z9RicianMLEPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjj"
.size .L__unnamed_1, 125
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__RicianMLEPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjj
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9RicianMLEPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjj
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000e49f6_00000000-6_RicianMLE_double.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2050:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2050:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z20calculateGlobalIndexv
.type _Z20calculateGlobalIndexv, @function
_Z20calculateGlobalIndexv:
.LFB2027:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2027:
.size _Z20calculateGlobalIndexv, .-_Z20calculateGlobalIndexv
.globl _Z11getBesseli0d
.type _Z11getBesseli0d, @function
_Z11getBesseli0d:
.LFB2028:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2028:
.size _Z11getBesseli0d, .-_Z11getBesseli0d
.globl _Z11getBesseli1d
.type _Z11getBesseli1d, @function
_Z11getBesseli1d:
.LFB2029:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2029:
.size _Z11getBesseli1d, .-_Z11getBesseli1d
.globl _Z6getMaxPdjm
.type _Z6getMaxPdjm, @function
_Z6getMaxPdjm:
.LFB2030:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2030:
.size _Z6getMaxPdjm, .-_Z6getMaxPdjm
.globl _Z15LUdecompositionPdiPiS_m
.type _Z15LUdecompositionPdiPiS_m, @function
_Z15LUdecompositionPdiPiS_m:
.LFB2031:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2031:
.size _Z15LUdecompositionPdiPiS_m, .-_Z15LUdecompositionPdiPiS_m
.globl _Z15LUsubstitutionsPdiPiS_m
.type _Z15LUsubstitutionsPdiPiS_m, @function
_Z15LUsubstitutionsPdiPiS_m:
.LFB2032:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2032:
.size _Z15LUsubstitutionsPdiPiS_m, .-_Z15LUsubstitutionsPdiPiS_m
.globl _Z21CholeskyDecompositionPdiS_m
.type _Z21CholeskyDecompositionPdiS_m, @function
_Z21CholeskyDecompositionPdiS_m:
.LFB2033:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2033:
.size _Z21CholeskyDecompositionPdiS_m, .-_Z21CholeskyDecompositionPdiS_m
.globl _Z24CholeskyBacksubstitutionPdiS_S_S_m
.type _Z24CholeskyBacksubstitutionPdiS_S_S_m, @function
_Z24CholeskyBacksubstitutionPdiS_S_S_m:
.LFB2034:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2034:
.size _Z24CholeskyBacksubstitutionPdiS_S_S_m, .-_Z24CholeskyBacksubstitutionPdiS_S_S_m
.globl _Z18calculateExpZThetaPdS_S_jjm
.type _Z18calculateExpZThetaPdS_S_jjm, @function
_Z18calculateExpZThetaPdS_S_jjm:
.LFB2035:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2035:
.size _Z18calculateExpZThetaPdS_S_jjm, .-_Z18calculateExpZThetaPdS_S_jjm
.globl _Z13calculateAB_1PdS_S_S_S_jm
.type _Z13calculateAB_1PdS_S_S_S_jm, @function
_Z13calculateAB_1PdS_S_S_S_jm:
.LFB2036:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2036:
.size _Z13calculateAB_1PdS_S_S_S_jm, .-_Z13calculateAB_1PdS_S_S_S_jm
.globl _Z13calculateAB_2PdS_S_S_S_S_S_S_jjm
.type _Z13calculateAB_2PdS_S_S_S_S_S_S_jjm, @function
_Z13calculateAB_2PdS_S_S_S_S_S_S_jjm:
.LFB2037:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2037:
.size _Z13calculateAB_2PdS_S_S_S_S_S_S_jjm, .-_Z13calculateAB_2PdS_S_S_S_S_S_S_jjm
.globl _Z11calculateENPdS_jPbm
.type _Z11calculateENPdS_jPbm, @function
_Z11calculateENPdS_jPbm:
.LFB2038:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2038:
.size _Z11calculateENPdS_jPbm, .-_Z11calculateENPdS_jPbm
.globl _Z15calculateZThetaPdS_S_S_S_jjm
.type _Z15calculateZThetaPdS_S_S_S_jjm, @function
_Z15calculateZThetaPdS_S_S_S_jjm:
.LFB2039:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2039:
.size _Z15calculateZThetaPdS_S_S_S_jjm, .-_Z15calculateZThetaPdS_S_S_S_jjm
.globl _Z22calculateLoglikelihoodPdS_S_S_S_S_jm
.type _Z22calculateLoglikelihoodPdS_S_S_S_S_jm, @function
_Z22calculateLoglikelihoodPdS_S_S_S_S_jm:
.LFB2040:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2040:
.size _Z22calculateLoglikelihoodPdS_S_S_S_S_jm, .-_Z22calculateLoglikelihoodPdS_S_S_S_S_jm
.globl _Z29initializeInformationMatricesPdS_jm
.type _Z29initializeInformationMatricesPdS_jm, @function
_Z29initializeInformationMatricesPdS_jm:
.LFB2041:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2041:
.size _Z29initializeInformationMatricesPdS_jm, .-_Z29initializeInformationMatricesPdS_jm
.globl _Z14iterateSigmaSQPdS_S_S_S_S_PjjdjPbm
.type _Z14iterateSigmaSQPdS_S_S_S_S_PjjdjPbm, @function
_Z14iterateSigmaSQPdS_S_S_S_S_PjjdjPbm:
.LFB2042:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2042:
.size _Z14iterateSigmaSQPdS_S_S_S_S_PjjdjPbm, .-_Z14iterateSigmaSQPdS_S_S_S_S_PjjdjPbm
.globl _Z9iterateS0PdS_S_S_S_S_PjjdjjPbm
.type _Z9iterateS0PdS_S_S_S_S_PjjdjjPbm, @function
_Z9iterateS0PdS_S_S_S_S_PjjdjjPbm:
.LFB2043:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2043:
.size _Z9iterateS0PdS_S_S_S_S_PjjdjjPbm, .-_Z9iterateS0PdS_S_S_S_S_PjjdjjPbm
.globl _Z26calculateFisherInformationPdS_S_S_S_S_S_S_jjjm
.type _Z26calculateFisherInformationPdS_S_S_S_S_S_S_jjjm, @function
_Z26calculateFisherInformationPdS_S_S_S_S_S_S_jjjm:
.LFB2044:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2044:
.size _Z26calculateFisherInformationPdS_S_S_S_S_S_S_jjjm, .-_Z26calculateFisherInformationPdS_S_S_S_S_S_S_jjjm
.globl _Z20iterateLoglikelihoodPiPdS0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_jjjddPjjdPbm
.type _Z20iterateLoglikelihoodPiPdS0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_jjjddPjjdPbm, @function
_Z20iterateLoglikelihoodPiPdS0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_jjjddPjjdPbm:
.LFB2045:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2045:
.size _Z20iterateLoglikelihoodPiPdS0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_jjjddPjjdPbm, .-_Z20iterateLoglikelihoodPiPdS0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_jjjddPjjdPbm
.globl _Z12iterateThetaPiPdS0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_ddjjjPjS1_jjddPbS2_m
.type _Z12iterateThetaPiPdS0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_ddjjjPjS1_jjddPbS2_m, @function
_Z12iterateThetaPiPdS0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_ddjjjPjS1_jjddPbS2_m:
.LFB2046:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2046:
.size _Z12iterateThetaPiPdS0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_ddjjjPjS1_jjddPbS2_m, .-_Z12iterateThetaPiPdS0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_S0_ddjjjPjS1_jjddPbS2_m
.globl _Z14calculateNormsPdS_S_S_jm
.type _Z14calculateNormsPdS_S_S_jm, @function
_Z14calculateNormsPdS_S_S_jm:
.LFB2047:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2047:
.size _Z14calculateNormsPdS_S_S_jm, .-_Z14calculateNormsPdS_S_S_jm
.globl _Z138__device_stub__Z9RicianMLEPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjjPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjj
.type _Z138__device_stub__Z9RicianMLEPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjjPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjj, @function
_Z138__device_stub__Z9RicianMLEPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjjPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjj:
.LFB2072:
.cfi_startproc
endbr64
subq $952, %rsp
.cfi_def_cfa_offset 960
movq %rdi, 392(%rsp)
movq %rsi, 384(%rsp)
movq %rdx, 376(%rsp)
movq %rcx, 368(%rsp)
movq %r8, 360(%rsp)
movq %r9, 352(%rsp)
movsd %xmm0, 40(%rsp)
movsd %xmm1, 32(%rsp)
movsd %xmm2, 24(%rsp)
movsd %xmm3, 16(%rsp)
movsd %xmm4, 8(%rsp)
movsd %xmm5, (%rsp)
movq 960(%rsp), %rax
movq %rax, 344(%rsp)
movq 968(%rsp), %rax
movq %rax, 336(%rsp)
movq 976(%rsp), %rax
movq %rax, 328(%rsp)
movq 984(%rsp), %rax
movq %rax, 320(%rsp)
movq 992(%rsp), %rax
movq %rax, 312(%rsp)
movq 1000(%rsp), %rax
movq %rax, 304(%rsp)
movq 1008(%rsp), %rax
movq %rax, 296(%rsp)
movq 1016(%rsp), %rax
movq %rax, 288(%rsp)
movq 1024(%rsp), %rax
movq %rax, 280(%rsp)
movq 1032(%rsp), %rax
movq %rax, 272(%rsp)
movq 1040(%rsp), %rax
movq %rax, 264(%rsp)
movq 1048(%rsp), %rax
movq %rax, 256(%rsp)
movq 1056(%rsp), %rax
movq %rax, 248(%rsp)
movq 1064(%rsp), %rax
movq %rax, 240(%rsp)
movq 1072(%rsp), %rax
movq %rax, 232(%rsp)
movq 1080(%rsp), %rax
movq %rax, 224(%rsp)
movq 1088(%rsp), %rax
movq %rax, 216(%rsp)
movq 1096(%rsp), %rax
movq %rax, 208(%rsp)
movq 1104(%rsp), %rax
movq %rax, 200(%rsp)
movq 1112(%rsp), %rax
movq %rax, 192(%rsp)
movq 1120(%rsp), %rax
movq %rax, 184(%rsp)
movq 1128(%rsp), %rax
movq %rax, 176(%rsp)
movq 1136(%rsp), %rax
movq %rax, 168(%rsp)
movq 1144(%rsp), %rax
movq %rax, 160(%rsp)
movq 1152(%rsp), %rax
movq %rax, 152(%rsp)
movq 1160(%rsp), %rax
movq %rax, 144(%rsp)
movq 1168(%rsp), %rax
movq %rax, 136(%rsp)
movq 1176(%rsp), %rax
movq %rax, 128(%rsp)
movq 1184(%rsp), %rax
movq %rax, 120(%rsp)
movq 1192(%rsp), %rax
movq %rax, 112(%rsp)
movq 1200(%rsp), %rax
movq %rax, 104(%rsp)
movq 1208(%rsp), %rax
movq %rax, 96(%rsp)
movq 1216(%rsp), %rax
movq %rax, 88(%rsp)
movq 1224(%rsp), %rax
movq %rax, 80(%rsp)
movq 1232(%rsp), %rax
movq %rax, 72(%rsp)
movq 1240(%rsp), %rax
movq %rax, 64(%rsp)
movq 1248(%rsp), %rax
movq %rax, 56(%rsp)
movq 1256(%rsp), %rax
movq %rax, 48(%rsp)
movq %fs:40, %rax
movq %rax, 936(%rsp)
xorl %eax, %eax
leaq 392(%rsp), %rax
movq %rax, 464(%rsp)
leaq 384(%rsp), %rax
movq %rax, 472(%rsp)
leaq 376(%rsp), %rax
movq %rax, 480(%rsp)
leaq 368(%rsp), %rax
movq %rax, 488(%rsp)
leaq 360(%rsp), %rax
movq %rax, 496(%rsp)
leaq 352(%rsp), %rax
movq %rax, 504(%rsp)
leaq 344(%rsp), %rax
movq %rax, 512(%rsp)
leaq 336(%rsp), %rax
movq %rax, 520(%rsp)
leaq 328(%rsp), %rax
movq %rax, 528(%rsp)
leaq 320(%rsp), %rax
movq %rax, 536(%rsp)
leaq 312(%rsp), %rax
movq %rax, 544(%rsp)
leaq 304(%rsp), %rax
movq %rax, 552(%rsp)
leaq 296(%rsp), %rax
movq %rax, 560(%rsp)
leaq 288(%rsp), %rax
movq %rax, 568(%rsp)
leaq 280(%rsp), %rax
movq %rax, 576(%rsp)
leaq 272(%rsp), %rax
movq %rax, 584(%rsp)
leaq 264(%rsp), %rax
movq %rax, 592(%rsp)
leaq 256(%rsp), %rax
movq %rax, 600(%rsp)
leaq 248(%rsp), %rax
movq %rax, 608(%rsp)
leaq 240(%rsp), %rax
movq %rax, 616(%rsp)
leaq 232(%rsp), %rax
movq %rax, 624(%rsp)
leaq 224(%rsp), %rax
movq %rax, 632(%rsp)
leaq 216(%rsp), %rax
movq %rax, 640(%rsp)
leaq 208(%rsp), %rax
movq %rax, 648(%rsp)
leaq 200(%rsp), %rax
movq %rax, 656(%rsp)
leaq 192(%rsp), %rax
movq %rax, 664(%rsp)
leaq 184(%rsp), %rax
movq %rax, 672(%rsp)
leaq 176(%rsp), %rax
movq %rax, 680(%rsp)
leaq 168(%rsp), %rax
movq %rax, 688(%rsp)
leaq 160(%rsp), %rax
movq %rax, 696(%rsp)
leaq 152(%rsp), %rax
movq %rax, 704(%rsp)
leaq 144(%rsp), %rax
movq %rax, 712(%rsp)
leaq 136(%rsp), %rax
movq %rax, 720(%rsp)
leaq 128(%rsp), %rax
movq %rax, 728(%rsp)
leaq 120(%rsp), %rax
movq %rax, 736(%rsp)
leaq 112(%rsp), %rax
movq %rax, 744(%rsp)
leaq 104(%rsp), %rax
movq %rax, 752(%rsp)
leaq 96(%rsp), %rax
movq %rax, 760(%rsp)
leaq 88(%rsp), %rax
movq %rax, 768(%rsp)
leaq 80(%rsp), %rax
movq %rax, 776(%rsp)
leaq 72(%rsp), %rax
movq %rax, 784(%rsp)
leaq 64(%rsp), %rax
movq %rax, 792(%rsp)
leaq 56(%rsp), %rax
movq %rax, 800(%rsp)
leaq 48(%rsp), %rax
movq %rax, 808(%rsp)
leaq 40(%rsp), %rax
movq %rax, 816(%rsp)
leaq 32(%rsp), %rax
movq %rax, 824(%rsp)
leaq 24(%rsp), %rax
movq %rax, 832(%rsp)
leaq 16(%rsp), %rax
movq %rax, 840(%rsp)
leaq 1264(%rsp), %rax
movq %rax, 848(%rsp)
leaq 1272(%rsp), %rax
movq %rax, 856(%rsp)
leaq 1280(%rsp), %rax
movq %rax, 864(%rsp)
leaq 1288(%rsp), %rax
movq %rax, 872(%rsp)
leaq 1296(%rsp), %rax
movq %rax, 880(%rsp)
leaq 8(%rsp), %rax
movq %rax, 888(%rsp)
movq %rsp, %rax
movq %rax, 896(%rsp)
leaq 1304(%rsp), %rax
movq %rax, 904(%rsp)
leaq 1312(%rsp), %rax
movq %rax, 912(%rsp)
leaq 1320(%rsp), %rax
movq %rax, 920(%rsp)
leaq 1328(%rsp), %rax
movq %rax, 928(%rsp)
movl $1, 416(%rsp)
movl $1, 420(%rsp)
movl $1, 424(%rsp)
movl $1, 428(%rsp)
movl $1, 432(%rsp)
movl $1, 436(%rsp)
leaq 408(%rsp), %rcx
leaq 400(%rsp), %rdx
leaq 428(%rsp), %rsi
leaq 416(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L49
.L45:
movq 936(%rsp), %rax
subq %fs:40, %rax
jne .L50
addq $952, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L49:
.cfi_restore_state
pushq 408(%rsp)
.cfi_def_cfa_offset 968
pushq 408(%rsp)
.cfi_def_cfa_offset 976
leaq 480(%rsp), %r9
movq 444(%rsp), %rcx
movl 452(%rsp), %r8d
movq 432(%rsp), %rsi
movl 440(%rsp), %edx
leaq _Z9RicianMLEPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjj(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 960
jmp .L45
.L50:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2072:
.size _Z138__device_stub__Z9RicianMLEPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjjPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjj, .-_Z138__device_stub__Z9RicianMLEPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjjPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjj
.globl _Z9RicianMLEPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjj
.type _Z9RicianMLEPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjj, @function
_Z9RicianMLEPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjj:
.LFB2073:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
movl 392(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
movl 392(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 40
movl 392(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 48
movl 392(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 56
movl 392(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 64
movl 392(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 72
movl 392(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 80
movl 392(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 88
movl 392(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 96
pushq 392(%rsp)
.cfi_def_cfa_offset 104
pushq 392(%rsp)
.cfi_def_cfa_offset 112
pushq 392(%rsp)
.cfi_def_cfa_offset 120
pushq 392(%rsp)
.cfi_def_cfa_offset 128
pushq 392(%rsp)
.cfi_def_cfa_offset 136
pushq 392(%rsp)
.cfi_def_cfa_offset 144
pushq 392(%rsp)
.cfi_def_cfa_offset 152
pushq 392(%rsp)
.cfi_def_cfa_offset 160
pushq 392(%rsp)
.cfi_def_cfa_offset 168
pushq 392(%rsp)
.cfi_def_cfa_offset 176
pushq 392(%rsp)
.cfi_def_cfa_offset 184
pushq 392(%rsp)
.cfi_def_cfa_offset 192
pushq 392(%rsp)
.cfi_def_cfa_offset 200
pushq 392(%rsp)
.cfi_def_cfa_offset 208
pushq 392(%rsp)
.cfi_def_cfa_offset 216
pushq 392(%rsp)
.cfi_def_cfa_offset 224
pushq 392(%rsp)
.cfi_def_cfa_offset 232
pushq 392(%rsp)
.cfi_def_cfa_offset 240
pushq 392(%rsp)
.cfi_def_cfa_offset 248
pushq 392(%rsp)
.cfi_def_cfa_offset 256
pushq 392(%rsp)
.cfi_def_cfa_offset 264
pushq 392(%rsp)
.cfi_def_cfa_offset 272
pushq 392(%rsp)
.cfi_def_cfa_offset 280
pushq 392(%rsp)
.cfi_def_cfa_offset 288
pushq 392(%rsp)
.cfi_def_cfa_offset 296
pushq 392(%rsp)
.cfi_def_cfa_offset 304
pushq 392(%rsp)
.cfi_def_cfa_offset 312
pushq 392(%rsp)
.cfi_def_cfa_offset 320
pushq 392(%rsp)
.cfi_def_cfa_offset 328
pushq 392(%rsp)
.cfi_def_cfa_offset 336
pushq 392(%rsp)
.cfi_def_cfa_offset 344
pushq 392(%rsp)
.cfi_def_cfa_offset 352
pushq 392(%rsp)
.cfi_def_cfa_offset 360
pushq 392(%rsp)
.cfi_def_cfa_offset 368
pushq 392(%rsp)
.cfi_def_cfa_offset 376
pushq 392(%rsp)
.cfi_def_cfa_offset 384
pushq 392(%rsp)
.cfi_def_cfa_offset 392
pushq 392(%rsp)
.cfi_def_cfa_offset 400
call _Z138__device_stub__Z9RicianMLEPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjjPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjj
addq $392, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2073:
.size _Z9RicianMLEPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjj, .-_Z9RicianMLEPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjj
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z9RicianMLEPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjj"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2075:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z9RicianMLEPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjj(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2075:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "RicianMLE_double.hip"
.globl _Z24__device_stub__RicianMLEPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjj # -- Begin function _Z24__device_stub__RicianMLEPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjj
.p2align 4, 0x90
.type _Z24__device_stub__RicianMLEPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjj,@function
_Z24__device_stub__RicianMLEPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjj: # @_Z24__device_stub__RicianMLEPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjj
.cfi_startproc
# %bb.0:
subq $616, %rsp # imm = 0x268
.cfi_def_cfa_offset 624
movq %rdi, 136(%rsp)
movq %rsi, 128(%rsp)
movq %rdx, 120(%rsp)
movq %rcx, 112(%rsp)
movq %r8, 104(%rsp)
movq %r9, 96(%rsp)
movsd %xmm0, 88(%rsp)
movsd %xmm1, 80(%rsp)
movsd %xmm2, 72(%rsp)
movsd %xmm3, 64(%rsp)
movsd %xmm4, 56(%rsp)
movsd %xmm5, 48(%rsp)
leaq 136(%rsp), %rax
movq %rax, 144(%rsp)
leaq 128(%rsp), %rax
movq %rax, 152(%rsp)
leaq 120(%rsp), %rax
movq %rax, 160(%rsp)
leaq 112(%rsp), %rax
movq %rax, 168(%rsp)
leaq 104(%rsp), %rax
movq %rax, 176(%rsp)
leaq 96(%rsp), %rax
movq %rax, 184(%rsp)
leaq 624(%rsp), %rax
movq %rax, 192(%rsp)
leaq 632(%rsp), %rax
movq %rax, 200(%rsp)
leaq 640(%rsp), %rax
movq %rax, 208(%rsp)
leaq 648(%rsp), %rax
movq %rax, 216(%rsp)
leaq 656(%rsp), %rax
movq %rax, 224(%rsp)
leaq 664(%rsp), %rax
movq %rax, 232(%rsp)
leaq 672(%rsp), %rax
movq %rax, 240(%rsp)
leaq 680(%rsp), %rax
movq %rax, 248(%rsp)
leaq 688(%rsp), %rax
movq %rax, 256(%rsp)
leaq 696(%rsp), %rax
movq %rax, 264(%rsp)
leaq 704(%rsp), %rax
movq %rax, 272(%rsp)
leaq 712(%rsp), %rax
movq %rax, 280(%rsp)
leaq 720(%rsp), %rax
movq %rax, 288(%rsp)
leaq 728(%rsp), %rax
movq %rax, 296(%rsp)
leaq 736(%rsp), %rax
movq %rax, 304(%rsp)
leaq 744(%rsp), %rax
movq %rax, 312(%rsp)
leaq 752(%rsp), %rax
movq %rax, 320(%rsp)
leaq 760(%rsp), %rax
movq %rax, 328(%rsp)
leaq 768(%rsp), %rax
movq %rax, 336(%rsp)
leaq 776(%rsp), %rax
movq %rax, 344(%rsp)
leaq 784(%rsp), %rax
movq %rax, 352(%rsp)
leaq 792(%rsp), %rax
movq %rax, 360(%rsp)
leaq 800(%rsp), %rax
movq %rax, 368(%rsp)
leaq 808(%rsp), %rax
movq %rax, 376(%rsp)
leaq 816(%rsp), %rax
movq %rax, 384(%rsp)
leaq 824(%rsp), %rax
movq %rax, 392(%rsp)
leaq 832(%rsp), %rax
movq %rax, 400(%rsp)
leaq 840(%rsp), %rax
movq %rax, 408(%rsp)
leaq 848(%rsp), %rax
movq %rax, 416(%rsp)
leaq 856(%rsp), %rax
movq %rax, 424(%rsp)
leaq 864(%rsp), %rax
movq %rax, 432(%rsp)
leaq 872(%rsp), %rax
movq %rax, 440(%rsp)
leaq 880(%rsp), %rax
movq %rax, 448(%rsp)
leaq 888(%rsp), %rax
movq %rax, 456(%rsp)
leaq 896(%rsp), %rax
movq %rax, 464(%rsp)
leaq 904(%rsp), %rax
movq %rax, 472(%rsp)
leaq 912(%rsp), %rax
movq %rax, 480(%rsp)
leaq 920(%rsp), %rax
movq %rax, 488(%rsp)
leaq 88(%rsp), %rax
movq %rax, 496(%rsp)
leaq 80(%rsp), %rax
movq %rax, 504(%rsp)
leaq 72(%rsp), %rax
movq %rax, 512(%rsp)
leaq 64(%rsp), %rax
movq %rax, 520(%rsp)
leaq 928(%rsp), %rax
movq %rax, 528(%rsp)
leaq 936(%rsp), %rax
movq %rax, 536(%rsp)
leaq 944(%rsp), %rax
movq %rax, 544(%rsp)
leaq 952(%rsp), %rax
movq %rax, 552(%rsp)
leaq 960(%rsp), %rax
movq %rax, 560(%rsp)
leaq 56(%rsp), %rax
movq %rax, 568(%rsp)
leaq 48(%rsp), %rax
movq %rax, 576(%rsp)
leaq 968(%rsp), %rax
movq %rax, 584(%rsp)
leaq 976(%rsp), %rax
movq %rax, 592(%rsp)
leaq 984(%rsp), %rax
movq %rax, 600(%rsp)
leaq 992(%rsp), %rax
movq %rax, 608(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 144(%rsp), %r9
movl $_Z9RicianMLEPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjj, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $632, %rsp # imm = 0x278
.cfi_adjust_cfa_offset -632
retq
.Lfunc_end0:
.size _Z24__device_stub__RicianMLEPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjj, .Lfunc_end0-_Z24__device_stub__RicianMLEPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjj
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9RicianMLEPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjj, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9RicianMLEPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjj,@object # @_Z9RicianMLEPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjj
.section .rodata,"a",@progbits
.globl _Z9RicianMLEPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjj
.p2align 3, 0x0
_Z9RicianMLEPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjj:
.quad _Z24__device_stub__RicianMLEPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjj
.size _Z9RicianMLEPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjj, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z9RicianMLEPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjj"
.size .L__unnamed_1, 125
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__RicianMLEPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjj
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9RicianMLEPdS_S_S_S_S_S_S_S_PiS_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_S_PjS1_S1_S1_S1_PbS2_S2_S2_S2_S2_ddddjjjjjddjjjj
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <cuda.h>
#include "cuda_runtime.h"
// #include <cutil.h>
#include "texture_fetch_functions.h"
#include "device_functions.h"
#include "device_launch_parameters.h"
#include <cuda_profiler_api.h>
#include <stdio.h>
#include <iostream>
using namespace std;
#define DATATYPE int
#define ARRAYLEN 128 * 1024 * 1024
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line)
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err)
{
std::cerr << std::endl
<< " CUDA error " << file
<< "(" << line << ")"
<< " : " << errorMessage
<< " -> " << cudaGetErrorString(err) << "(" << (int)err
<< ") " << std::endl
<< std::endl;
cudaDeviceReset();
std::exit(EXIT_FAILURE);
}
}
#define __CUDA_ERROR(msg) \
{ \
cudaDeviceSynchronize(); \
__getLastCudaError(msg, __FILE__, __LINE__); \
}
#define size 100
__constant__ int *prt[size];
__global__ void matrix_add()
{
for (int i = threadIdx.x; i < size; i += blockDim.x)
{
*(prt[i]) = 2;
}
}
int main()
{
// int device;
// cudaGetDevice(&device);
cudaSetDevice(5);
int *p = (int *)malloc(sizeof(int) * size);
printf("host:%d,%d\n", sizeof(int *), sizeof(int));
// int *pd[size];
// for (int i = 0; i < size; i++)
// {
// cudaMalloc(&(pd[i]), sizeof(int) * 1);
// }
int *pd;
cudaMalloc(&pd, sizeof(int) * size);
int *pdarray[size];
for (int i = 0; i < size; i++)
{
pdarray[i] = pd + i;
}
cudaMemcpyToSymbol(prt, pdarray, sizeof(int *) * size);
matrix_add<<<1, 256>>>();
// __CUDA_ERROR("dfa");
cudaMemcpy(p, pd, sizeof(int) * size, cudaMemcpyDeviceToHost);
// cudaMemcpyFromSymbol(hd, data, sizeof(int) * 1024);
// for (int i = 0; i < size; i++)
// {
// cudaMemcpy(&p[i], pd[i], sizeof(int) * 1, cudaMemcpyDeviceToHost);
// }
for (int i = 0; i < size; i++)
{
cout << p[i] << " ";
}
} | code for sm_80
Function : _Z10matrix_addv
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e240000002100 */
/*0020*/ ISETP.GT.AND P0, PT, R0, 0x63, PT ; /* 0x000000630000780c */
/* 0x001fda0003f04270 */
/*0030*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0040*/ IMAD.MOV.U32 R5, RZ, RZ, 0x2 ; /* 0x00000002ff057424 */
/* 0x000fe200078e00ff */
/*0050*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0060*/ IMAD.SHL.U32 R2, R0.reuse, 0x8, RZ ; /* 0x0000000800027824 */
/* 0x041fe200078e00ff */
/*0070*/ IADD3 R0, R0, c[0x0][0x0], RZ ; /* 0x0000000000007a10 */
/* 0x000fc80007ffe0ff */
/*0080*/ ISETP.GE.AND P0, PT, R0, 0x64, PT ; /* 0x000000640000780c */
/* 0x000fe20003f06270 */
/*0090*/ LDC.64 R2, c[0x3][R2] ; /* 0x00c0000002027b82 */
/* 0x000e240000000a00 */
/*00a0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x0011f4000c101904 */
/*00b0*/ @!P0 BRA 0x60 ; /* 0xffffffa000008947 */
/* 0x000fea000383ffff */
/*00c0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00d0*/ BRA 0xd0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <cuda.h>
#include "cuda_runtime.h"
// #include <cutil.h>
#include "texture_fetch_functions.h"
#include "device_functions.h"
#include "device_launch_parameters.h"
#include <cuda_profiler_api.h>
#include <stdio.h>
#include <iostream>
using namespace std;
#define DATATYPE int
#define ARRAYLEN 128 * 1024 * 1024
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line)
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err)
{
std::cerr << std::endl
<< " CUDA error " << file
<< "(" << line << ")"
<< " : " << errorMessage
<< " -> " << cudaGetErrorString(err) << "(" << (int)err
<< ") " << std::endl
<< std::endl;
cudaDeviceReset();
std::exit(EXIT_FAILURE);
}
}
#define __CUDA_ERROR(msg) \
{ \
cudaDeviceSynchronize(); \
__getLastCudaError(msg, __FILE__, __LINE__); \
}
#define size 100
__constant__ int *prt[size];
__global__ void matrix_add()
{
for (int i = threadIdx.x; i < size; i += blockDim.x)
{
*(prt[i]) = 2;
}
}
int main()
{
// int device;
// cudaGetDevice(&device);
cudaSetDevice(5);
int *p = (int *)malloc(sizeof(int) * size);
printf("host:%d,%d\n", sizeof(int *), sizeof(int));
// int *pd[size];
// for (int i = 0; i < size; i++)
// {
// cudaMalloc(&(pd[i]), sizeof(int) * 1);
// }
int *pd;
cudaMalloc(&pd, sizeof(int) * size);
int *pdarray[size];
for (int i = 0; i < size; i++)
{
pdarray[i] = pd + i;
}
cudaMemcpyToSymbol(prt, pdarray, sizeof(int *) * size);
matrix_add<<<1, 256>>>();
// __CUDA_ERROR("dfa");
cudaMemcpy(p, pd, sizeof(int) * size, cudaMemcpyDeviceToHost);
// cudaMemcpyFromSymbol(hd, data, sizeof(int) * 1024);
// for (int i = 0; i < size; i++)
// {
// cudaMemcpy(&p[i], pd[i], sizeof(int) * 1, cudaMemcpyDeviceToHost);
// }
for (int i = 0; i < size; i++)
{
cout << p[i] << " ";
}
} | .file "tmpxft_0004157d_00000000-6_constantMemTest.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3673:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3673:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z29__device_stub__Z10matrix_addvv
.type _Z29__device_stub__Z10matrix_addvv, @function
_Z29__device_stub__Z10matrix_addvv:
.LFB3695:
.cfi_startproc
endbr64
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
leaq 8(%rsp), %rcx
movq %rsp, %rdx
leaq 28(%rsp), %rsi
leaq 16(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 8(%rsp)
.cfi_def_cfa_offset 104
pushq 8(%rsp)
.cfi_def_cfa_offset 112
leaq 80(%rsp), %r9
movq 44(%rsp), %rcx
movl 52(%rsp), %r8d
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
leaq _Z10matrix_addv(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 96
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3695:
.size _Z29__device_stub__Z10matrix_addvv, .-_Z29__device_stub__Z10matrix_addvv
.globl _Z10matrix_addv
.type _Z10matrix_addv, @function
_Z10matrix_addv:
.LFB3696:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z29__device_stub__Z10matrix_addvv
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3696:
.size _Z10matrix_addv, .-_Z10matrix_addv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "host:%d,%d\n"
.LC1:
.string " "
.text
.globl main
.type main, @function
main:
.LFB3670:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $856, %rsp
.cfi_def_cfa_offset 896
movq %fs:40, %rax
movq %rax, 840(%rsp)
xorl %eax, %eax
movl $5, %edi
call cudaSetDevice@PLT
movl $400, %edi
call malloc@PLT
movq %rax, %rbp
movl $4, %ecx
movl $8, %edx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %rsp, %rdi
movl $400, %esi
call cudaMalloc@PLT
movq (%rsp), %rdx
leaq 32(%rsp), %rax
leaq 832(%rsp), %rcx
.L12:
movq %rdx, (%rax)
addq $4, %rdx
addq $8, %rax
cmpq %rcx, %rax
jne .L12
leaq 32(%rsp), %rsi
movl $1, %r8d
movl $0, %ecx
movl $800, %edx
leaq _ZL3prt(%rip), %rdi
call cudaMemcpyToSymbol@PLT
movl $256, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 8(%rsp)
movl $1, 12(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L19
.L13:
movl $2, %ecx
movl $400, %edx
movq (%rsp), %rsi
movq %rbp, %rdi
call cudaMemcpy@PLT
movq %rbp, %rbx
addq $400, %rbp
leaq _ZSt4cout(%rip), %r13
leaq .LC1(%rip), %r12
.L14:
movl (%rbx), %esi
movq %r13, %rdi
call _ZNSolsEi@PLT
movq %rax, %rdi
movl $1, %edx
movq %r12, %rsi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
addq $4, %rbx
cmpq %rbp, %rbx
jne .L14
movq 840(%rsp), %rax
subq %fs:40, %rax
jne .L20
movl $0, %eax
addq $856, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L19:
.cfi_restore_state
call _Z29__device_stub__Z10matrix_addvv
jmp .L13
.L20:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3670:
.size main, .-main
.section .rodata.str1.1
.LC2:
.string "_Z10matrix_addv"
.LC3:
.string "prt"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3698:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z10matrix_addv(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $1
.cfi_def_cfa_offset 32
movl $800, %r9d
movl $0, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq _ZL3prt(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3698:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.local _ZL3prt
.comm _ZL3prt,800,32
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <cuda.h>
#include "cuda_runtime.h"
// #include <cutil.h>
#include "texture_fetch_functions.h"
#include "device_functions.h"
#include "device_launch_parameters.h"
#include <cuda_profiler_api.h>
#include <stdio.h>
#include <iostream>
using namespace std;
#define DATATYPE int
#define ARRAYLEN 128 * 1024 * 1024
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line)
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err)
{
std::cerr << std::endl
<< " CUDA error " << file
<< "(" << line << ")"
<< " : " << errorMessage
<< " -> " << cudaGetErrorString(err) << "(" << (int)err
<< ") " << std::endl
<< std::endl;
cudaDeviceReset();
std::exit(EXIT_FAILURE);
}
}
#define __CUDA_ERROR(msg) \
{ \
cudaDeviceSynchronize(); \
__getLastCudaError(msg, __FILE__, __LINE__); \
}
#define size 100
__constant__ int *prt[size];
__global__ void matrix_add()
{
for (int i = threadIdx.x; i < size; i += blockDim.x)
{
*(prt[i]) = 2;
}
}
int main()
{
// int device;
// cudaGetDevice(&device);
cudaSetDevice(5);
int *p = (int *)malloc(sizeof(int) * size);
printf("host:%d,%d\n", sizeof(int *), sizeof(int));
// int *pd[size];
// for (int i = 0; i < size; i++)
// {
// cudaMalloc(&(pd[i]), sizeof(int) * 1);
// }
int *pd;
cudaMalloc(&pd, sizeof(int) * size);
int *pdarray[size];
for (int i = 0; i < size; i++)
{
pdarray[i] = pd + i;
}
cudaMemcpyToSymbol(prt, pdarray, sizeof(int *) * size);
matrix_add<<<1, 256>>>();
// __CUDA_ERROR("dfa");
cudaMemcpy(p, pd, sizeof(int) * size, cudaMemcpyDeviceToHost);
// cudaMemcpyFromSymbol(hd, data, sizeof(int) * 1024);
// for (int i = 0; i < size; i++)
// {
// cudaMemcpy(&p[i], pd[i], sizeof(int) * 1, cudaMemcpyDeviceToHost);
// }
for (int i = 0; i < size; i++)
{
cout << p[i] << " ";
}
} | #include <hip/hip_runtime.h>
// #include <cutil.h>
#include "hip/device_functions.h"
#include <hip/hip_runtime_api.h>
#include <stdio.h>
#include <iostream>
using namespace std;
#define DATATYPE int
#define ARRAYLEN 128 * 1024 * 1024
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line)
{
hipError_t err = hipGetLastError();
if (hipSuccess != err)
{
std::cerr << std::endl
<< " CUDA error " << file
<< "(" << line << ")"
<< " : " << errorMessage
<< " -> " << hipGetErrorString(err) << "(" << (int)err
<< ") " << std::endl
<< std::endl;
hipDeviceReset();
std::exit(EXIT_FAILURE);
}
}
#define __CUDA_ERROR(msg) \
{ \
hipDeviceSynchronize(); \
__getLastCudaError(msg, __FILE__, __LINE__); \
}
#define size 100
__constant__ int *prt[size];
__global__ void matrix_add()
{
for (int i = threadIdx.x; i < size; i += blockDim.x)
{
*(prt[i]) = 2;
}
}
int main()
{
// int device;
// cudaGetDevice(&device);
hipSetDevice(5);
int *p = (int *)malloc(sizeof(int) * size);
printf("host:%d,%d\n", sizeof(int *), sizeof(int));
// int *pd[size];
// for (int i = 0; i < size; i++)
// {
// cudaMalloc(&(pd[i]), sizeof(int) * 1);
// }
int *pd;
hipMalloc(&pd, sizeof(int) * size);
int *pdarray[size];
for (int i = 0; i < size; i++)
{
pdarray[i] = pd + i;
}
hipMemcpyToSymbol(HIP_SYMBOL(prt), pdarray, sizeof(int *) * size);
matrix_add<<<1, 256>>>();
// __CUDA_ERROR("dfa");
hipMemcpy(p, pd, sizeof(int) * size, hipMemcpyDeviceToHost);
// cudaMemcpyFromSymbol(hd, data, sizeof(int) * 1024);
// for (int i = 0; i < size; i++)
// {
// cudaMemcpy(&p[i], pd[i], sizeof(int) * 1, cudaMemcpyDeviceToHost);
// }
for (int i = 0; i < size; i++)
{
cout << p[i] << " ";
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
// #include <cutil.h>
#include "hip/device_functions.h"
#include <hip/hip_runtime_api.h>
#include <stdio.h>
#include <iostream>
using namespace std;
#define DATATYPE int
#define ARRAYLEN 128 * 1024 * 1024
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line)
{
hipError_t err = hipGetLastError();
if (hipSuccess != err)
{
std::cerr << std::endl
<< " CUDA error " << file
<< "(" << line << ")"
<< " : " << errorMessage
<< " -> " << hipGetErrorString(err) << "(" << (int)err
<< ") " << std::endl
<< std::endl;
hipDeviceReset();
std::exit(EXIT_FAILURE);
}
}
#define __CUDA_ERROR(msg) \
{ \
hipDeviceSynchronize(); \
__getLastCudaError(msg, __FILE__, __LINE__); \
}
#define size 100
__constant__ int *prt[size];
__global__ void matrix_add()
{
for (int i = threadIdx.x; i < size; i += blockDim.x)
{
*(prt[i]) = 2;
}
}
int main()
{
// int device;
// cudaGetDevice(&device);
hipSetDevice(5);
int *p = (int *)malloc(sizeof(int) * size);
printf("host:%d,%d\n", sizeof(int *), sizeof(int));
// int *pd[size];
// for (int i = 0; i < size; i++)
// {
// cudaMalloc(&(pd[i]), sizeof(int) * 1);
// }
int *pd;
hipMalloc(&pd, sizeof(int) * size);
int *pdarray[size];
for (int i = 0; i < size; i++)
{
pdarray[i] = pd + i;
}
hipMemcpyToSymbol(HIP_SYMBOL(prt), pdarray, sizeof(int *) * size);
matrix_add<<<1, 256>>>();
// __CUDA_ERROR("dfa");
hipMemcpy(p, pd, sizeof(int) * size, hipMemcpyDeviceToHost);
// cudaMemcpyFromSymbol(hd, data, sizeof(int) * 1024);
// for (int i = 0; i < size; i++)
// {
// cudaMemcpy(&p[i], pd[i], sizeof(int) * 1, cudaMemcpyDeviceToHost);
// }
for (int i = 0; i < size; i++)
{
cout << p[i] << " ";
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z10matrix_addv
.globl _Z10matrix_addv
.p2align 8
.type _Z10matrix_addv,@function
_Z10matrix_addv:
s_mov_b32 s2, exec_lo
v_cmpx_gt_u32_e32 0x64, v0
s_cbranch_execz .LBB0_3
s_load_b32 s2, s[0:1], 0xc
v_lshlrev_b32_e32 v1, 3, v0
s_getpc_b64 s[0:1]
s_add_u32 s0, s0, prt@rel32@lo+4
s_addc_u32 s1, s1, prt@rel32@hi+12
v_mov_b32_e32 v3, 2
v_add_co_u32 v1, s0, v1, s0
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v2, null, 0, s1, s0
s_waitcnt lgkmcnt(0)
s_and_b32 s1, s2, 0xffff
s_mov_b32 s2, 0
s_lshl_b32 s3, s1, 3
s_mov_b32 s4, s2
.LBB0_2:
global_load_b64 v[4:5], v[1:2], off
v_add_nc_u32_e32 v0, s1, v0
v_add_co_u32 v1, s0, v1, s3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_ci_u32_e64 v2, s0, s2, v2, s0
v_cmp_lt_u32_e32 vcc_lo, 0x63, v0
s_or_b32 s4, vcc_lo, s4
s_waitcnt vmcnt(0)
global_store_b32 v[4:5], v3, off
s_and_not1_b32 exec_lo, exec_lo, s4
s_cbranch_execnz .LBB0_2
.LBB0_3:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z10matrix_addv
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 256
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 5
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z10matrix_addv, .Lfunc_end0-_Z10matrix_addv
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.protected prt
.type prt,@object
.section .bss,"aw",@nobits
.globl prt
.p2align 4, 0x0
prt:
.zero 800
.size prt, 800
.type __hip_cuid_,@object
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym prt
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: hidden_block_count_x
- .offset: 4
.size: 4
.value_kind: hidden_block_count_y
- .offset: 8
.size: 4
.value_kind: hidden_block_count_z
- .offset: 12
.size: 2
.value_kind: hidden_group_size_x
- .offset: 14
.size: 2
.value_kind: hidden_group_size_y
- .offset: 16
.size: 2
.value_kind: hidden_group_size_z
- .offset: 18
.size: 2
.value_kind: hidden_remainder_x
- .offset: 20
.size: 2
.value_kind: hidden_remainder_y
- .offset: 22
.size: 2
.value_kind: hidden_remainder_z
- .offset: 40
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 48
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 64
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 256
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z10matrix_addv
.private_segment_fixed_size: 0
.sgpr_count: 7
.sgpr_spill_count: 0
.symbol: _Z10matrix_addv.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
// #include <cutil.h>
#include "hip/device_functions.h"
#include <hip/hip_runtime_api.h>
#include <stdio.h>
#include <iostream>
using namespace std;
#define DATATYPE int
#define ARRAYLEN 128 * 1024 * 1024
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line)
{
hipError_t err = hipGetLastError();
if (hipSuccess != err)
{
std::cerr << std::endl
<< " CUDA error " << file
<< "(" << line << ")"
<< " : " << errorMessage
<< " -> " << hipGetErrorString(err) << "(" << (int)err
<< ") " << std::endl
<< std::endl;
hipDeviceReset();
std::exit(EXIT_FAILURE);
}
}
#define __CUDA_ERROR(msg) \
{ \
hipDeviceSynchronize(); \
__getLastCudaError(msg, __FILE__, __LINE__); \
}
#define size 100
__constant__ int *prt[size];
__global__ void matrix_add()
{
for (int i = threadIdx.x; i < size; i += blockDim.x)
{
*(prt[i]) = 2;
}
}
int main()
{
// int device;
// cudaGetDevice(&device);
hipSetDevice(5);
int *p = (int *)malloc(sizeof(int) * size);
printf("host:%d,%d\n", sizeof(int *), sizeof(int));
// int *pd[size];
// for (int i = 0; i < size; i++)
// {
// cudaMalloc(&(pd[i]), sizeof(int) * 1);
// }
int *pd;
hipMalloc(&pd, sizeof(int) * size);
int *pdarray[size];
for (int i = 0; i < size; i++)
{
pdarray[i] = pd + i;
}
hipMemcpyToSymbol(HIP_SYMBOL(prt), pdarray, sizeof(int *) * size);
matrix_add<<<1, 256>>>();
// __CUDA_ERROR("dfa");
hipMemcpy(p, pd, sizeof(int) * size, hipMemcpyDeviceToHost);
// cudaMemcpyFromSymbol(hd, data, sizeof(int) * 1024);
// for (int i = 0; i < size; i++)
// {
// cudaMemcpy(&p[i], pd[i], sizeof(int) * 1, cudaMemcpyDeviceToHost);
// }
for (int i = 0; i < size; i++)
{
cout << p[i] << " ";
}
} | .text
.file "constantMemTest.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z25__device_stub__matrix_addv # -- Begin function _Z25__device_stub__matrix_addv
.p2align 4, 0x90
.type _Z25__device_stub__matrix_addv,@function
_Z25__device_stub__matrix_addv: # @_Z25__device_stub__matrix_addv
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z10matrix_addv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $72, %rsp
.cfi_adjust_cfa_offset -72
retq
.Lfunc_end0:
.size _Z25__device_stub__matrix_addv, .Lfunc_end0-_Z25__device_stub__matrix_addv
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
subq $888, %rsp # imm = 0x378
.cfi_def_cfa_offset 912
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
movl $5, %edi
callq hipSetDevice
movl $400, %edi # imm = 0x190
callq malloc
movq %rax, %rbx
xorl %r14d, %r14d
movl $.L.str, %edi
movl $8, %esi
movl $4, %edx
xorl %eax, %eax
callq printf
leaq 8(%rsp), %rdi
movl $400, %esi # imm = 0x190
callq hipMalloc
movq 8(%rsp), %rax
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
leaq (%rax,%r14), %rcx
movq %rcx, 80(%rsp,%r14,2)
addq $4, %r14
cmpq $400, %r14 # imm = 0x190
jne .LBB1_1
# %bb.2:
leaq 80(%rsp), %rsi
movl $prt, %edi
movl $800, %edx # imm = 0x320
xorl %ecx, %ecx
movl $1, %r8d
callq hipMemcpyToSymbol
movabsq $4294967297, %rdi # imm = 0x100000001
leaq 255(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_4
# %bb.3:
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z10matrix_addv, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_4:
movq 8(%rsp), %rsi
movl $400, %edx # imm = 0x190
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB1_5: # =>This Inner Loop Header: Depth=1
movl (%rbx,%r14,4), %esi
movl $_ZSt4cout, %edi
callq _ZNSolsEi
movl $.L.str.1, %esi
movl $1, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
incq %r14
cmpq $100, %r14
jne .LBB1_5
# %bb.6:
xorl %eax, %eax
addq $888, %rsp # imm = 0x378
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z10matrix_addv, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $0, 8(%rsp)
movl $1, (%rsp)
movl $prt, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movl $800, %r9d # imm = 0x320
movq %rbx, %rdi
xorl %r8d, %r8d
callq __hipRegisterVar
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type prt,@object # @prt
.local prt
.comm prt,800,16
.type _Z10matrix_addv,@object # @_Z10matrix_addv
.section .rodata,"a",@progbits
.globl _Z10matrix_addv
.p2align 3, 0x0
_Z10matrix_addv:
.quad _Z25__device_stub__matrix_addv
.size _Z10matrix_addv, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "host:%d,%d\n"
.size .L.str, 12
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz " "
.size .L.str.1, 2
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z10matrix_addv"
.size .L__unnamed_1, 16
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "prt"
.size .L__unnamed_2, 4
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z25__device_stub__matrix_addv
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym prt
.addrsig_sym _Z10matrix_addv
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z10matrix_addv
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e240000002100 */
/*0020*/ ISETP.GT.AND P0, PT, R0, 0x63, PT ; /* 0x000000630000780c */
/* 0x001fda0003f04270 */
/*0030*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0040*/ IMAD.MOV.U32 R5, RZ, RZ, 0x2 ; /* 0x00000002ff057424 */
/* 0x000fe200078e00ff */
/*0050*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0060*/ IMAD.SHL.U32 R2, R0.reuse, 0x8, RZ ; /* 0x0000000800027824 */
/* 0x041fe200078e00ff */
/*0070*/ IADD3 R0, R0, c[0x0][0x0], RZ ; /* 0x0000000000007a10 */
/* 0x000fc80007ffe0ff */
/*0080*/ ISETP.GE.AND P0, PT, R0, 0x64, PT ; /* 0x000000640000780c */
/* 0x000fe20003f06270 */
/*0090*/ LDC.64 R2, c[0x3][R2] ; /* 0x00c0000002027b82 */
/* 0x000e240000000a00 */
/*00a0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x0011f4000c101904 */
/*00b0*/ @!P0 BRA 0x60 ; /* 0xffffffa000008947 */
/* 0x000fea000383ffff */
/*00c0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00d0*/ BRA 0xd0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z10matrix_addv
.globl _Z10matrix_addv
.p2align 8
.type _Z10matrix_addv,@function
_Z10matrix_addv:
s_mov_b32 s2, exec_lo
v_cmpx_gt_u32_e32 0x64, v0
s_cbranch_execz .LBB0_3
s_load_b32 s2, s[0:1], 0xc
v_lshlrev_b32_e32 v1, 3, v0
s_getpc_b64 s[0:1]
s_add_u32 s0, s0, prt@rel32@lo+4
s_addc_u32 s1, s1, prt@rel32@hi+12
v_mov_b32_e32 v3, 2
v_add_co_u32 v1, s0, v1, s0
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v2, null, 0, s1, s0
s_waitcnt lgkmcnt(0)
s_and_b32 s1, s2, 0xffff
s_mov_b32 s2, 0
s_lshl_b32 s3, s1, 3
s_mov_b32 s4, s2
.LBB0_2:
global_load_b64 v[4:5], v[1:2], off
v_add_nc_u32_e32 v0, s1, v0
v_add_co_u32 v1, s0, v1, s3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_ci_u32_e64 v2, s0, s2, v2, s0
v_cmp_lt_u32_e32 vcc_lo, 0x63, v0
s_or_b32 s4, vcc_lo, s4
s_waitcnt vmcnt(0)
global_store_b32 v[4:5], v3, off
s_and_not1_b32 exec_lo, exec_lo, s4
s_cbranch_execnz .LBB0_2
.LBB0_3:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z10matrix_addv
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 256
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 5
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z10matrix_addv, .Lfunc_end0-_Z10matrix_addv
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.protected prt
.type prt,@object
.section .bss,"aw",@nobits
.globl prt
.p2align 4, 0x0
prt:
.zero 800
.size prt, 800
.type __hip_cuid_,@object
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym prt
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: hidden_block_count_x
- .offset: 4
.size: 4
.value_kind: hidden_block_count_y
- .offset: 8
.size: 4
.value_kind: hidden_block_count_z
- .offset: 12
.size: 2
.value_kind: hidden_group_size_x
- .offset: 14
.size: 2
.value_kind: hidden_group_size_y
- .offset: 16
.size: 2
.value_kind: hidden_group_size_z
- .offset: 18
.size: 2
.value_kind: hidden_remainder_x
- .offset: 20
.size: 2
.value_kind: hidden_remainder_y
- .offset: 22
.size: 2
.value_kind: hidden_remainder_z
- .offset: 40
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 48
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 64
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 256
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z10matrix_addv
.private_segment_fixed_size: 0
.sgpr_count: 7
.sgpr_spill_count: 0
.symbol: _Z10matrix_addv.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0004157d_00000000-6_constantMemTest.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3673:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3673:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z29__device_stub__Z10matrix_addvv
.type _Z29__device_stub__Z10matrix_addvv, @function
_Z29__device_stub__Z10matrix_addvv:
.LFB3695:
.cfi_startproc
endbr64
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
leaq 8(%rsp), %rcx
movq %rsp, %rdx
leaq 28(%rsp), %rsi
leaq 16(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 8(%rsp)
.cfi_def_cfa_offset 104
pushq 8(%rsp)
.cfi_def_cfa_offset 112
leaq 80(%rsp), %r9
movq 44(%rsp), %rcx
movl 52(%rsp), %r8d
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
leaq _Z10matrix_addv(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 96
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3695:
.size _Z29__device_stub__Z10matrix_addvv, .-_Z29__device_stub__Z10matrix_addvv
.globl _Z10matrix_addv
.type _Z10matrix_addv, @function
_Z10matrix_addv:
.LFB3696:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z29__device_stub__Z10matrix_addvv
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3696:
.size _Z10matrix_addv, .-_Z10matrix_addv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "host:%d,%d\n"
.LC1:
.string " "
.text
.globl main
.type main, @function
main:
.LFB3670:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $856, %rsp
.cfi_def_cfa_offset 896
movq %fs:40, %rax
movq %rax, 840(%rsp)
xorl %eax, %eax
movl $5, %edi
call cudaSetDevice@PLT
movl $400, %edi
call malloc@PLT
movq %rax, %rbp
movl $4, %ecx
movl $8, %edx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %rsp, %rdi
movl $400, %esi
call cudaMalloc@PLT
movq (%rsp), %rdx
leaq 32(%rsp), %rax
leaq 832(%rsp), %rcx
.L12:
movq %rdx, (%rax)
addq $4, %rdx
addq $8, %rax
cmpq %rcx, %rax
jne .L12
leaq 32(%rsp), %rsi
movl $1, %r8d
movl $0, %ecx
movl $800, %edx
leaq _ZL3prt(%rip), %rdi
call cudaMemcpyToSymbol@PLT
movl $256, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 8(%rsp)
movl $1, 12(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L19
.L13:
movl $2, %ecx
movl $400, %edx
movq (%rsp), %rsi
movq %rbp, %rdi
call cudaMemcpy@PLT
movq %rbp, %rbx
addq $400, %rbp
leaq _ZSt4cout(%rip), %r13
leaq .LC1(%rip), %r12
.L14:
movl (%rbx), %esi
movq %r13, %rdi
call _ZNSolsEi@PLT
movq %rax, %rdi
movl $1, %edx
movq %r12, %rsi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
addq $4, %rbx
cmpq %rbp, %rbx
jne .L14
movq 840(%rsp), %rax
subq %fs:40, %rax
jne .L20
movl $0, %eax
addq $856, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L19:
.cfi_restore_state
call _Z29__device_stub__Z10matrix_addvv
jmp .L13
.L20:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3670:
.size main, .-main
.section .rodata.str1.1
.LC2:
.string "_Z10matrix_addv"
.LC3:
.string "prt"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3698:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z10matrix_addv(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $1
.cfi_def_cfa_offset 32
movl $800, %r9d
movl $0, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq _ZL3prt(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3698:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.local _ZL3prt
.comm _ZL3prt,800,32
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "constantMemTest.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z25__device_stub__matrix_addv # -- Begin function _Z25__device_stub__matrix_addv
.p2align 4, 0x90
.type _Z25__device_stub__matrix_addv,@function
_Z25__device_stub__matrix_addv: # @_Z25__device_stub__matrix_addv
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z10matrix_addv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $72, %rsp
.cfi_adjust_cfa_offset -72
retq
.Lfunc_end0:
.size _Z25__device_stub__matrix_addv, .Lfunc_end0-_Z25__device_stub__matrix_addv
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
subq $888, %rsp # imm = 0x378
.cfi_def_cfa_offset 912
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
movl $5, %edi
callq hipSetDevice
movl $400, %edi # imm = 0x190
callq malloc
movq %rax, %rbx
xorl %r14d, %r14d
movl $.L.str, %edi
movl $8, %esi
movl $4, %edx
xorl %eax, %eax
callq printf
leaq 8(%rsp), %rdi
movl $400, %esi # imm = 0x190
callq hipMalloc
movq 8(%rsp), %rax
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
leaq (%rax,%r14), %rcx
movq %rcx, 80(%rsp,%r14,2)
addq $4, %r14
cmpq $400, %r14 # imm = 0x190
jne .LBB1_1
# %bb.2:
leaq 80(%rsp), %rsi
movl $prt, %edi
movl $800, %edx # imm = 0x320
xorl %ecx, %ecx
movl $1, %r8d
callq hipMemcpyToSymbol
movabsq $4294967297, %rdi # imm = 0x100000001
leaq 255(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_4
# %bb.3:
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z10matrix_addv, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_4:
movq 8(%rsp), %rsi
movl $400, %edx # imm = 0x190
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB1_5: # =>This Inner Loop Header: Depth=1
movl (%rbx,%r14,4), %esi
movl $_ZSt4cout, %edi
callq _ZNSolsEi
movl $.L.str.1, %esi
movl $1, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
incq %r14
cmpq $100, %r14
jne .LBB1_5
# %bb.6:
xorl %eax, %eax
addq $888, %rsp # imm = 0x378
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z10matrix_addv, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $0, 8(%rsp)
movl $1, (%rsp)
movl $prt, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movl $800, %r9d # imm = 0x320
movq %rbx, %rdi
xorl %r8d, %r8d
callq __hipRegisterVar
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type prt,@object # @prt
.local prt
.comm prt,800,16
.type _Z10matrix_addv,@object # @_Z10matrix_addv
.section .rodata,"a",@progbits
.globl _Z10matrix_addv
.p2align 3, 0x0
_Z10matrix_addv:
.quad _Z25__device_stub__matrix_addv
.size _Z10matrix_addv, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "host:%d,%d\n"
.size .L.str, 12
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz " "
.size .L.str.1, 2
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z10matrix_addv"
.size .L__unnamed_1, 16
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "prt"
.size .L__unnamed_2, 4
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z25__device_stub__matrix_addv
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym prt
.addrsig_sym _Z10matrix_addv
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | /*
* This program uses the host CURAND API to generate 100
* pseudorandom floats.
*/
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <curand.h>
#define CUDA_CALL(x) do { if((x)!=cudaSuccess) { \
printf("Error at %s:%d\n",__FILE__,__LINE__);\
return EXIT_FAILURE;}} while(0)
#define CURAND_CALL(x) do { if((x)!=CURAND_STATUS_SUCCESS) { \
printf("Error at %s:%d\n",__FILE__,__LINE__);\
return EXIT_FAILURE;}} while(0)
int main(int argc, char *argv[])
{
size_t n = 100;
size_t i;
curandGenerator_t gen;
float *devData, *hostData;
/* Allocate n floats on host */
hostData = (float *)calloc(n, sizeof(float));
/* Allocate n floats on device */
CUDA_CALL(cudaMalloc((void **)&devData, n*sizeof(float)));
/* Create pseudo-random number generator */
CURAND_CALL(curandCreateGenerator(&gen,
CURAND_RNG_PSEUDO_DEFAULT));
/* Set seed */
CURAND_CALL(curandSetPseudoRandomGeneratorSeed(gen,
1234ULL));
/* Generate n floats on device */
CURAND_CALL(curandGenerateUniform(gen, devData, n));
/* Copy device memory to host */
CUDA_CALL(cudaMemcpy(hostData, devData, n * sizeof(float),
cudaMemcpyDeviceToHost));
/* Show result */
for(i = 0; i < n; i++) {
printf("%1.4f ", hostData[i]);
}
printf("\n");
/* Cleanup */
CURAND_CALL(curandDestroyGenerator(gen));
CUDA_CALL(cudaFree(devData));
free(hostData);
return EXIT_SUCCESS;
} | code for sm_80 |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | /*
* This program uses the host CURAND API to generate 100
* pseudorandom floats.
*/
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <curand.h>
#define CUDA_CALL(x) do { if((x)!=cudaSuccess) { \
printf("Error at %s:%d\n",__FILE__,__LINE__);\
return EXIT_FAILURE;}} while(0)
#define CURAND_CALL(x) do { if((x)!=CURAND_STATUS_SUCCESS) { \
printf("Error at %s:%d\n",__FILE__,__LINE__);\
return EXIT_FAILURE;}} while(0)
int main(int argc, char *argv[])
{
size_t n = 100;
size_t i;
curandGenerator_t gen;
float *devData, *hostData;
/* Allocate n floats on host */
hostData = (float *)calloc(n, sizeof(float));
/* Allocate n floats on device */
CUDA_CALL(cudaMalloc((void **)&devData, n*sizeof(float)));
/* Create pseudo-random number generator */
CURAND_CALL(curandCreateGenerator(&gen,
CURAND_RNG_PSEUDO_DEFAULT));
/* Set seed */
CURAND_CALL(curandSetPseudoRandomGeneratorSeed(gen,
1234ULL));
/* Generate n floats on device */
CURAND_CALL(curandGenerateUniform(gen, devData, n));
/* Copy device memory to host */
CUDA_CALL(cudaMemcpy(hostData, devData, n * sizeof(float),
cudaMemcpyDeviceToHost));
/* Show result */
for(i = 0; i < n; i++) {
printf("%1.4f ", hostData[i]);
}
printf("\n");
/* Cleanup */
CURAND_CALL(curandDestroyGenerator(gen));
CUDA_CALL(cudaFree(devData));
free(hostData);
return EXIT_SUCCESS;
} | .file "tmpxft_00136600_00000000-6_curand.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "/home/ubuntu/Datasets/stackv2/train-structured/seungpyo/cuda_study/master/curand.cu"
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "Error at %s:%d\n"
.LC2:
.string "%1.4f "
.LC3:
.string "\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $40, %rsp
.cfi_def_cfa_offset 80
movq %fs:40, %rax
movq %rax, 24(%rsp)
xorl %eax, %eax
movl $4, %esi
movl $100, %edi
call calloc@PLT
movq %rax, %r13
leaq 16(%rsp), %rdi
movl $400, %esi
call cudaMalloc@PLT
testl %eax, %eax
jne .L16
leaq 8(%rsp), %rdi
movl $100, %esi
call curandCreateGenerator@PLT
testl %eax, %eax
jne .L17
movl $1234, %esi
movq 8(%rsp), %rdi
call curandSetPseudoRandomGeneratorSeed@PLT
testl %eax, %eax
jne .L18
movl $100, %edx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call curandGenerateUniform@PLT
testl %eax, %eax
jne .L19
movl $2, %ecx
movl $400, %edx
movq 16(%rsp), %rsi
movq %r13, %rdi
call cudaMemcpy@PLT
testl %eax, %eax
jne .L20
movq %r13, %rbx
leaq 400(%r13), %r12
leaq .LC2(%rip), %rbp
.L10:
pxor %xmm0, %xmm0
cvtss2sd (%rbx), %xmm0
movq %rbp, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq %r12, %rbx
jne .L10
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 8(%rsp), %rdi
call curandDestroyGenerator@PLT
testl %eax, %eax
jne .L21
movq 16(%rsp), %rdi
call cudaFree@PLT
testl %eax, %eax
jne .L22
movq %r13, %rdi
call free@PLT
movl $0, %eax
jmp .L3
.L16:
movl $28, %ecx
leaq .LC0(%rip), %rdx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %eax
.L3:
movq 24(%rsp), %rdx
subq %fs:40, %rdx
jne .L23
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L17:
.cfi_restore_state
movl $31, %ecx
leaq .LC0(%rip), %rdx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %eax
jmp .L3
.L18:
movl $35, %ecx
leaq .LC0(%rip), %rdx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %eax
jmp .L3
.L19:
movl $39, %ecx
leaq .LC0(%rip), %rdx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %eax
jmp .L3
.L20:
movl $42, %ecx
leaq .LC0(%rip), %rdx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %eax
jmp .L3
.L21:
movl $52, %ecx
leaq .LC0(%rip), %rdx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %eax
jmp .L3
.L22:
movl $53, %ecx
leaq .LC0(%rip), %rdx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %eax
jmp .L3
.L23:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | /*
* This program uses the host CURAND API to generate 100
* pseudorandom floats.
*/
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <curand.h>
#define CUDA_CALL(x) do { if((x)!=cudaSuccess) { \
printf("Error at %s:%d\n",__FILE__,__LINE__);\
return EXIT_FAILURE;}} while(0)
#define CURAND_CALL(x) do { if((x)!=CURAND_STATUS_SUCCESS) { \
printf("Error at %s:%d\n",__FILE__,__LINE__);\
return EXIT_FAILURE;}} while(0)
int main(int argc, char *argv[])
{
size_t n = 100;
size_t i;
curandGenerator_t gen;
float *devData, *hostData;
/* Allocate n floats on host */
hostData = (float *)calloc(n, sizeof(float));
/* Allocate n floats on device */
CUDA_CALL(cudaMalloc((void **)&devData, n*sizeof(float)));
/* Create pseudo-random number generator */
CURAND_CALL(curandCreateGenerator(&gen,
CURAND_RNG_PSEUDO_DEFAULT));
/* Set seed */
CURAND_CALL(curandSetPseudoRandomGeneratorSeed(gen,
1234ULL));
/* Generate n floats on device */
CURAND_CALL(curandGenerateUniform(gen, devData, n));
/* Copy device memory to host */
CUDA_CALL(cudaMemcpy(hostData, devData, n * sizeof(float),
cudaMemcpyDeviceToHost));
/* Show result */
for(i = 0; i < n; i++) {
printf("%1.4f ", hostData[i]);
}
printf("\n");
/* Cleanup */
CURAND_CALL(curandDestroyGenerator(gen));
CUDA_CALL(cudaFree(devData));
free(hostData);
return EXIT_SUCCESS;
} | /*
* This program uses the host CURAND API to generate 100
* pseudorandom floats.
*/
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#define CUDA_CALL(x) do { if((x)!=hipSuccess) { \
printf("Error at %s:%d\n",__FILE__,__LINE__);\
return EXIT_FAILURE;}} while(0)
#define CURAND_CALL(x) do { if((x)!=HIPRAND_STATUS_SUCCESS) { \
printf("Error at %s:%d\n",__FILE__,__LINE__);\
return EXIT_FAILURE;}} while(0)
int main(int argc, char *argv[])
{
size_t n = 100;
size_t i;
hiprandGenerator_t gen;
float *devData, *hostData;
/* Allocate n floats on host */
hostData = (float *)calloc(n, sizeof(float));
/* Allocate n floats on device */
CUDA_CALL(hipMalloc((void **)&devData, n*sizeof(float)));
/* Create pseudo-random number generator */
CURAND_CALL(hiprandCreateGenerator(&gen,
HIPRAND_RNG_PSEUDO_DEFAULT));
/* Set seed */
CURAND_CALL(hiprandSetPseudoRandomGeneratorSeed(gen,
1234ULL));
/* Generate n floats on device */
CURAND_CALL(hiprandGenerateUniform(gen, devData, n));
/* Copy device memory to host */
CUDA_CALL(hipMemcpy(hostData, devData, n * sizeof(float),
hipMemcpyDeviceToHost));
/* Show result */
for(i = 0; i < n; i++) {
printf("%1.4f ", hostData[i]);
}
printf("\n");
/* Cleanup */
CURAND_CALL(hiprandDestroyGenerator(gen));
CUDA_CALL(hipFree(devData));
free(hostData);
return EXIT_SUCCESS;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | /*
* This program uses the host CURAND API to generate 100
* pseudorandom floats.
*/
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#define CUDA_CALL(x) do { if((x)!=hipSuccess) { \
printf("Error at %s:%d\n",__FILE__,__LINE__);\
return EXIT_FAILURE;}} while(0)
#define CURAND_CALL(x) do { if((x)!=HIPRAND_STATUS_SUCCESS) { \
printf("Error at %s:%d\n",__FILE__,__LINE__);\
return EXIT_FAILURE;}} while(0)
int main(int argc, char *argv[])
{
size_t n = 100;
size_t i;
hiprandGenerator_t gen;
float *devData, *hostData;
/* Allocate n floats on host */
hostData = (float *)calloc(n, sizeof(float));
/* Allocate n floats on device */
CUDA_CALL(hipMalloc((void **)&devData, n*sizeof(float)));
/* Create pseudo-random number generator */
CURAND_CALL(hiprandCreateGenerator(&gen,
HIPRAND_RNG_PSEUDO_DEFAULT));
/* Set seed */
CURAND_CALL(hiprandSetPseudoRandomGeneratorSeed(gen,
1234ULL));
/* Generate n floats on device */
CURAND_CALL(hiprandGenerateUniform(gen, devData, n));
/* Copy device memory to host */
CUDA_CALL(hipMemcpy(hostData, devData, n * sizeof(float),
hipMemcpyDeviceToHost));
/* Show result */
for(i = 0; i < n; i++) {
printf("%1.4f ", hostData[i]);
}
printf("\n");
/* Cleanup */
CURAND_CALL(hiprandDestroyGenerator(gen));
CUDA_CALL(hipFree(devData));
free(hostData);
return EXIT_SUCCESS;
} | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | /*
* This program uses the host CURAND API to generate 100
* pseudorandom floats.
*/
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#define CUDA_CALL(x) do { if((x)!=hipSuccess) { \
printf("Error at %s:%d\n",__FILE__,__LINE__);\
return EXIT_FAILURE;}} while(0)
#define CURAND_CALL(x) do { if((x)!=HIPRAND_STATUS_SUCCESS) { \
printf("Error at %s:%d\n",__FILE__,__LINE__);\
return EXIT_FAILURE;}} while(0)
int main(int argc, char *argv[])
{
size_t n = 100;
size_t i;
hiprandGenerator_t gen;
float *devData, *hostData;
/* Allocate n floats on host */
hostData = (float *)calloc(n, sizeof(float));
/* Allocate n floats on device */
CUDA_CALL(hipMalloc((void **)&devData, n*sizeof(float)));
/* Create pseudo-random number generator */
CURAND_CALL(hiprandCreateGenerator(&gen,
HIPRAND_RNG_PSEUDO_DEFAULT));
/* Set seed */
CURAND_CALL(hiprandSetPseudoRandomGeneratorSeed(gen,
1234ULL));
/* Generate n floats on device */
CURAND_CALL(hiprandGenerateUniform(gen, devData, n));
/* Copy device memory to host */
CUDA_CALL(hipMemcpy(hostData, devData, n * sizeof(float),
hipMemcpyDeviceToHost));
/* Show result */
for(i = 0; i < n; i++) {
printf("%1.4f ", hostData[i]);
}
printf("\n");
/* Cleanup */
CURAND_CALL(hiprandDestroyGenerator(gen));
CUDA_CALL(hipFree(devData));
free(hostData);
return EXIT_SUCCESS;
} | .text
.file "curand.hip"
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
subq $24, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
movl $100, %edi
movl $4, %esi
callq calloc
movq %rax, %rbx
leaq 8(%rsp), %rdi
movl $400, %esi # imm = 0x190
callq hipMalloc
testl %eax, %eax
je .LBB0_3
# %bb.1:
movl $.L.str, %edi
movl $.L.str.1, %esi
movl $28, %edx
jmp .LBB0_2
.LBB0_3:
leaq 16(%rsp), %rdi
movl $400, %esi # imm = 0x190
callq hiprandCreateGenerator
testl %eax, %eax
je .LBB0_5
# %bb.4:
movl $.L.str, %edi
movl $.L.str.1, %esi
movl $32, %edx
jmp .LBB0_2
.LBB0_5:
movq 16(%rsp), %rdi
movl $1234, %esi # imm = 0x4D2
callq hiprandSetPseudoRandomGeneratorSeed
testl %eax, %eax
je .LBB0_7
# %bb.6:
movl $.L.str, %edi
movl $.L.str.1, %esi
movl $36, %edx
jmp .LBB0_2
.LBB0_7:
movq 16(%rsp), %rdi
movq 8(%rsp), %rsi
movl $100, %edx
callq hiprandGenerateUniform
testl %eax, %eax
je .LBB0_9
# %bb.8:
movl $.L.str, %edi
movl $.L.str.1, %esi
movl $39, %edx
jmp .LBB0_2
.LBB0_9:
movq 8(%rsp), %rsi
movl $400, %edx # imm = 0x190
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
testl %eax, %eax
je .LBB0_10
# %bb.18:
movl $.L.str, %edi
movl $.L.str.1, %esi
movl $43, %edx
jmp .LBB0_2
.LBB0_10: # %.preheader.preheader
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB0_11: # %.preheader
# =>This Inner Loop Header: Depth=1
movss (%rbx,%r14,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.2, %edi
movb $1, %al
callq printf
incq %r14
cmpq $100, %r14
jne .LBB0_11
# %bb.12:
movl $10, %edi
callq putchar@PLT
movq 16(%rsp), %rdi
callq hiprandDestroyGenerator
testl %eax, %eax
je .LBB0_14
# %bb.13:
movl $.L.str, %edi
movl $.L.str.1, %esi
movl $52, %edx
jmp .LBB0_2
.LBB0_14:
movq 8(%rsp), %rdi
callq hipFree
testl %eax, %eax
je .LBB0_16
# %bb.15:
movl $.L.str, %edi
movl $.L.str.1, %esi
movl $53, %edx
.LBB0_2:
xorl %eax, %eax
callq printf
movl $1, %eax
.LBB0_17:
addq $24, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
retq
.LBB0_16:
.cfi_def_cfa_offset 48
movq %rbx, %rdi
callq free
xorl %eax, %eax
jmp .LBB0_17
.Lfunc_end0:
.size main, .Lfunc_end0-main
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Error at %s:%d\n"
.size .L.str, 16
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "/home/ubuntu/Datasets/stackv2/train-structured-repos-hip/seungpyo/cuda_study/master/curand.hip"
.size .L.str.1, 95
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "%1.4f "
.size .L.str.2, 7
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80 | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00136600_00000000-6_curand.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "/home/ubuntu/Datasets/stackv2/train-structured/seungpyo/cuda_study/master/curand.cu"
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "Error at %s:%d\n"
.LC2:
.string "%1.4f "
.LC3:
.string "\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $40, %rsp
.cfi_def_cfa_offset 80
movq %fs:40, %rax
movq %rax, 24(%rsp)
xorl %eax, %eax
movl $4, %esi
movl $100, %edi
call calloc@PLT
movq %rax, %r13
leaq 16(%rsp), %rdi
movl $400, %esi
call cudaMalloc@PLT
testl %eax, %eax
jne .L16
leaq 8(%rsp), %rdi
movl $100, %esi
call curandCreateGenerator@PLT
testl %eax, %eax
jne .L17
movl $1234, %esi
movq 8(%rsp), %rdi
call curandSetPseudoRandomGeneratorSeed@PLT
testl %eax, %eax
jne .L18
movl $100, %edx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call curandGenerateUniform@PLT
testl %eax, %eax
jne .L19
movl $2, %ecx
movl $400, %edx
movq 16(%rsp), %rsi
movq %r13, %rdi
call cudaMemcpy@PLT
testl %eax, %eax
jne .L20
movq %r13, %rbx
leaq 400(%r13), %r12
leaq .LC2(%rip), %rbp
.L10:
pxor %xmm0, %xmm0
cvtss2sd (%rbx), %xmm0
movq %rbp, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq %r12, %rbx
jne .L10
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 8(%rsp), %rdi
call curandDestroyGenerator@PLT
testl %eax, %eax
jne .L21
movq 16(%rsp), %rdi
call cudaFree@PLT
testl %eax, %eax
jne .L22
movq %r13, %rdi
call free@PLT
movl $0, %eax
jmp .L3
.L16:
movl $28, %ecx
leaq .LC0(%rip), %rdx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %eax
.L3:
movq 24(%rsp), %rdx
subq %fs:40, %rdx
jne .L23
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L17:
.cfi_restore_state
movl $31, %ecx
leaq .LC0(%rip), %rdx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %eax
jmp .L3
.L18:
movl $35, %ecx
leaq .LC0(%rip), %rdx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %eax
jmp .L3
.L19:
movl $39, %ecx
leaq .LC0(%rip), %rdx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %eax
jmp .L3
.L20:
movl $42, %ecx
leaq .LC0(%rip), %rdx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %eax
jmp .L3
.L21:
movl $52, %ecx
leaq .LC0(%rip), %rdx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %eax
jmp .L3
.L22:
movl $53, %ecx
leaq .LC0(%rip), %rdx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %eax
jmp .L3
.L23:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "curand.hip"
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
subq $24, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
movl $100, %edi
movl $4, %esi
callq calloc
movq %rax, %rbx
leaq 8(%rsp), %rdi
movl $400, %esi # imm = 0x190
callq hipMalloc
testl %eax, %eax
je .LBB0_3
# %bb.1:
movl $.L.str, %edi
movl $.L.str.1, %esi
movl $28, %edx
jmp .LBB0_2
.LBB0_3:
leaq 16(%rsp), %rdi
movl $400, %esi # imm = 0x190
callq hiprandCreateGenerator
testl %eax, %eax
je .LBB0_5
# %bb.4:
movl $.L.str, %edi
movl $.L.str.1, %esi
movl $32, %edx
jmp .LBB0_2
.LBB0_5:
movq 16(%rsp), %rdi
movl $1234, %esi # imm = 0x4D2
callq hiprandSetPseudoRandomGeneratorSeed
testl %eax, %eax
je .LBB0_7
# %bb.6:
movl $.L.str, %edi
movl $.L.str.1, %esi
movl $36, %edx
jmp .LBB0_2
.LBB0_7:
movq 16(%rsp), %rdi
movq 8(%rsp), %rsi
movl $100, %edx
callq hiprandGenerateUniform
testl %eax, %eax
je .LBB0_9
# %bb.8:
movl $.L.str, %edi
movl $.L.str.1, %esi
movl $39, %edx
jmp .LBB0_2
.LBB0_9:
movq 8(%rsp), %rsi
movl $400, %edx # imm = 0x190
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
testl %eax, %eax
je .LBB0_10
# %bb.18:
movl $.L.str, %edi
movl $.L.str.1, %esi
movl $43, %edx
jmp .LBB0_2
.LBB0_10: # %.preheader.preheader
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB0_11: # %.preheader
# =>This Inner Loop Header: Depth=1
movss (%rbx,%r14,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.2, %edi
movb $1, %al
callq printf
incq %r14
cmpq $100, %r14
jne .LBB0_11
# %bb.12:
movl $10, %edi
callq putchar@PLT
movq 16(%rsp), %rdi
callq hiprandDestroyGenerator
testl %eax, %eax
je .LBB0_14
# %bb.13:
movl $.L.str, %edi
movl $.L.str.1, %esi
movl $52, %edx
jmp .LBB0_2
.LBB0_14:
movq 8(%rsp), %rdi
callq hipFree
testl %eax, %eax
je .LBB0_16
# %bb.15:
movl $.L.str, %edi
movl $.L.str.1, %esi
movl $53, %edx
.LBB0_2:
xorl %eax, %eax
callq printf
movl $1, %eax
.LBB0_17:
addq $24, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
retq
.LBB0_16:
.cfi_def_cfa_offset 48
movq %rbx, %rdi
callq free
xorl %eax, %eax
jmp .LBB0_17
.Lfunc_end0:
.size main, .Lfunc_end0-main
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Error at %s:%d\n"
.size .L.str, 16
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "/home/ubuntu/Datasets/stackv2/train-structured-repos-hip/seungpyo/cuda_study/master/curand.hip"
.size .L.str.1, 95
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "%1.4f "
.size .L.str.2, 7
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <iostream>
#include <cmath>
#include <cuda.h>
#include <chrono>
using namespace std;
using namespace std::chrono;
__global__ void sum(int *arr, int *sumArr, int n){
int start = blockIdx.x * 256;
int sum = 0;
for(int i = start; i < min(start+256,n); i++){
sum += arr[i];
}
sumArr[blockIdx.x] = sum;
}
__global__ void pMin(int *arr, int *minArr, int n){
int start = blockIdx.x * 256;
int minVal = 9999999;
for(int i = start; i < min(start+256,n); i++){
if(arr[i] < minVal){minVal = arr[i];}
}
minArr[blockIdx.x] = minVal;
}
//These 2 functions are for standard deviation
__global__ void standardDeviation(float *arr, float* calcArr, float mean, int n){
int start = blockIdx.x * 256;
float aggregate = 0;
for(int i = start; i < min(start + 256, n); i++){
aggregate = aggregate + ((arr[i] - mean) *(arr[i] - mean));
}
calcArr[blockIdx.x] = aggregate;
}
__global__ void addAll(float *arr, float *calcArr, int n){
int start = blockIdx.x * 256;
float sum = 0;
for(int i = start; i < min(start + 256, n); i++){
sum += arr[i];
}
calcArr[blockIdx.x] = sum;
}
__global__ void pMax(float *arr, float *maxArr, int n){
int start = blockIdx.x * 256;
int maxm = -9999999;
for(int i = start; i < min(start+256,n); i++){
if(arr[i] > maxm){
maxm = arr[i];
}
}
maxArr[blockIdx.x] = maxm;
}
int main(){
int n;
cout << "Enter number of elements: ";
cin >> n;
int *hostArr,*devArr,*devSumArr, *devMinArr;
hostArr = new int[n];
for(int i = 0; i < n; i++){
hostArr[i] = i+1;
}
cudaMalloc(&devArr, n * 4);
int blocks = ceil(n * 1.0f/256.0f);
cudaMalloc(&devSumArr, blocks * 4);
cudaMemcpy(devArr, hostArr, n*4, cudaMemcpyHostToDevice);
//Sum
int curr = n;
auto start = high_resolution_clock::now();
while(curr > 1){
sum<<<blocks, 1>>>(devArr, devSumArr, curr);
cudaMemcpy(devArr,devSumArr,blocks*4,cudaMemcpyDeviceToDevice);
curr = ceil(curr* 1.0f/256.0f);
blocks = ceil(curr*1.0f/256.0f);
}
auto stop = high_resolution_clock::now();
cout << "parallel: " << duration_cast<microseconds>(stop - start).count() << endl;
start = high_resolution_clock::now();
long myVal = 0;
for(int i = 0; i < n; i++){
myVal = myVal + hostArr[i];
}
stop = high_resolution_clock::now();
cout << "serial: " << duration_cast<microseconds>(stop - start).count() << endl;
int sum;
cudaMemcpy(&sum,devArr,4,cudaMemcpyDeviceToHost);
cout << "Sum: " << sum << endl;
//Min value i
cudaMalloc(&devMinArr, blocks * 4);
//Refill device array with values of host array
cudaMemcpy(devArr,hostArr,n*4,cudaMemcpyHostToDevice);
curr = n;
blocks = ceil(n * 1.0f/256.0f);
while(curr > 1){
pMin<<<blocks, 1>>>(devArr, devMinArr, curr);
cudaMemcpy(devArr, devMinArr, blocks*4, cudaMemcpyDeviceToDevice);
curr = blocks;
blocks = ceil(curr*1.0f/256.0f);
}
int minVal;
cudaMemcpy(&minVal, devArr, 4, cudaMemcpyDeviceToHost);
cout << "min val: " << minVal << endl;
float mean = (float)sum/n;
//Standard deviation
float *fDevArr, *fStdSum;
blocks = ceil(n * 1.0f/ 256.0f);
//Create an aggregate array
cudaMalloc(&fStdSum, sizeof(float) * blocks);
//Copy mean's value to gpu mean
float *floatHost = new float[n];
for(int i = 0; i < n; i++){
floatHost[i] = (float)hostArr[i];
}
//Allocate device array space in gpu
cudaMalloc(&fDevArr, sizeof(float) * n);
//Refill device array with values of host array
cudaMemcpy(fDevArr,floatHost, 4 * n, cudaMemcpyHostToDevice);
standardDeviation<<<blocks, 1>>>(fDevArr, fStdSum, mean, n);
float *myArr = new float[blocks];
cudaMemcpy(myArr, fStdSum, sizeof(float) * blocks, cudaMemcpyDeviceToHost);
float total = 0;
for(int i = 0; i < blocks; i++){
total += myArr[i];
}
total /= n;
total = sqrt(total);
cout << "validation standard deviation: " << total << endl;
curr = blocks;
while(curr > 1){
cudaMemcpy(fDevArr, fStdSum, curr * sizeof(float), cudaMemcpyDeviceToDevice);
blocks = ceil(curr * 1.0f/256.0f);
cout << "blocks for aggregation: " << blocks << endl;
addAll<<<blocks, 1>>>(fDevArr,fStdSum, curr);
curr = blocks;
}
float stdDeviation;
cudaMemcpy(&stdDeviation, fStdSum, sizeof(float), cudaMemcpyDeviceToHost);
stdDeviation /= n;
stdDeviation = sqrt(stdDeviation);
cout << "Standard deviation: " << stdDeviation << endl;
float *intermediateMax;
blocks = ceil(n * 1.0f/256.0f);
cudaMalloc(&intermediateMax, blocks * sizeof(float));
cudaMemcpy(fDevArr,floatHost, 4 * n, cudaMemcpyHostToDevice);
curr = n;
while(curr > 1){
pMax<<<blocks, 1>>>(fDevArr, intermediateMax, curr);
cudaMemcpy(fDevArr, intermediateMax, blocks*sizeof(float), cudaMemcpyDeviceToDevice);
float *tempArr = new float[blocks];
cudaMemcpy(tempArr, intermediateMax, blocks*sizeof(float), cudaMemcpyDeviceToHost);
cout << "Intermediate maximum values: ";
for(int i = 0; i < blocks; i++){
cout << tempArr[i] << " ";
}
cout << endl;
curr = blocks;
blocks = ceil(curr * 1.0f/256.0f);
}
float maxm = 0;
cudaMemcpy(&maxm, intermediateMax, sizeof(float), cudaMemcpyDeviceToHost);
cout << "Maximum: " << maxm << endl;
} | .file "tmpxft_000423d8_00000000-6_parallelreduction.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3773:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3773:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z25__device_stub__Z3sumPiS_iPiS_i
.type _Z25__device_stub__Z3sumPiS_iPiS_i, @function
_Z25__device_stub__Z3sumPiS_iPiS_i:
.LFB3795:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z3sumPiS_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3795:
.size _Z25__device_stub__Z3sumPiS_iPiS_i, .-_Z25__device_stub__Z3sumPiS_iPiS_i
.globl _Z3sumPiS_i
.type _Z3sumPiS_i, @function
_Z3sumPiS_i:
.LFB3796:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z25__device_stub__Z3sumPiS_iPiS_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3796:
.size _Z3sumPiS_i, .-_Z3sumPiS_i
.globl _Z26__device_stub__Z4pMinPiS_iPiS_i
.type _Z26__device_stub__Z4pMinPiS_iPiS_i, @function
_Z26__device_stub__Z4pMinPiS_iPiS_i:
.LFB3797:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z4pMinPiS_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3797:
.size _Z26__device_stub__Z4pMinPiS_iPiS_i, .-_Z26__device_stub__Z4pMinPiS_iPiS_i
.globl _Z4pMinPiS_i
.type _Z4pMinPiS_i, @function
_Z4pMinPiS_i:
.LFB3798:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z26__device_stub__Z4pMinPiS_iPiS_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3798:
.size _Z4pMinPiS_i, .-_Z4pMinPiS_i
.globl _Z41__device_stub__Z17standardDeviationPfS_fiPfS_fi
.type _Z41__device_stub__Z17standardDeviationPfS_fiPfS_fi, @function
_Z41__device_stub__Z17standardDeviationPfS_fiPfS_fi:
.LFB3799:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movss %xmm0, 12(%rsp)
movl %edx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L23
.L19:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L24
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L23:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z17standardDeviationPfS_fi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L19
.L24:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3799:
.size _Z41__device_stub__Z17standardDeviationPfS_fiPfS_fi, .-_Z41__device_stub__Z17standardDeviationPfS_fiPfS_fi
.globl _Z17standardDeviationPfS_fi
.type _Z17standardDeviationPfS_fi, @function
_Z17standardDeviationPfS_fi:
.LFB3800:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z41__device_stub__Z17standardDeviationPfS_fiPfS_fi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3800:
.size _Z17standardDeviationPfS_fi, .-_Z17standardDeviationPfS_fi
.globl _Z28__device_stub__Z6addAllPfS_iPfS_i
.type _Z28__device_stub__Z6addAllPfS_iPfS_i, @function
_Z28__device_stub__Z6addAllPfS_iPfS_i:
.LFB3801:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L31
.L27:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L32
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L31:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z6addAllPfS_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L27
.L32:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3801:
.size _Z28__device_stub__Z6addAllPfS_iPfS_i, .-_Z28__device_stub__Z6addAllPfS_iPfS_i
.globl _Z6addAllPfS_i
.type _Z6addAllPfS_i, @function
_Z6addAllPfS_i:
.LFB3802:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z28__device_stub__Z6addAllPfS_iPfS_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3802:
.size _Z6addAllPfS_i, .-_Z6addAllPfS_i
.globl _Z26__device_stub__Z4pMaxPfS_iPfS_i
.type _Z26__device_stub__Z4pMaxPfS_iPfS_i, @function
_Z26__device_stub__Z4pMaxPfS_iPfS_i:
.LFB3803:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L39
.L35:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L40
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L39:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z4pMaxPfS_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L35
.L40:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3803:
.size _Z26__device_stub__Z4pMaxPfS_iPfS_i, .-_Z26__device_stub__Z4pMaxPfS_iPfS_i
.globl _Z4pMaxPfS_i
.type _Z4pMaxPfS_i, @function
_Z4pMaxPfS_i:
.LFB3804:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z26__device_stub__Z4pMaxPfS_iPfS_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3804:
.size _Z4pMaxPfS_i, .-_Z4pMaxPfS_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "Enter number of elements: "
.LC6:
.string "parallel: "
.LC7:
.string "serial: "
.LC8:
.string "Sum: "
.LC9:
.string "min val: "
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC10:
.string "validation standard deviation: "
.section .rodata.str1.1
.LC11:
.string "blocks for aggregation: "
.LC12:
.string "Standard deviation: "
.LC13:
.string "Intermediate maximum values: "
.LC14:
.string " "
.LC15:
.string "Maximum: "
.text
.globl main
.type main, @function
main:
.LFB3768:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $120, %rsp
.cfi_def_cfa_offset 176
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq .LC1(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
leaq 16(%rsp), %rsi
leaq _ZSt3cin(%rip), %rdi
call _ZNSirsERi@PLT
movslq 16(%rsp), %rax
movabsq $2305843009213693950, %rdx
cmpq %rax, %rdx
jb .L44
leaq 0(,%rax,4), %rdi
call _Znam@PLT
movq %rax, %rbp
movl 16(%rsp), %esi
movslq %esi, %rcx
movl $1, %eax
testl %esi, %esi
jle .L46
.L48:
movl %eax, -4(%rbp,%rax,4)
movq %rax, %rdx
addq $1, %rax
cmpq %rdx, %rcx
jne .L48
.L46:
sall $2, %esi
movslq %esi, %rsi
leaq 32(%rsp), %rdi
call cudaMalloc@PLT
pxor %xmm0, %xmm0
cvtsi2ssl 16(%rsp), %xmm0
mulss .LC2(%rip), %xmm0
movaps %xmm0, %xmm3
movss .LC16(%rip), %xmm2
movaps %xmm0, %xmm1
andps %xmm2, %xmm1
movss .LC3(%rip), %xmm4
ucomiss %xmm1, %xmm4
jbe .L49
cvttss2sil %xmm0, %eax
pxor %xmm1, %xmm1
cvtsi2ssl %eax, %xmm1
cmpnless %xmm1, %xmm3
movss .LC5(%rip), %xmm4
andps %xmm4, %xmm3
addss %xmm1, %xmm3
andnps %xmm0, %xmm2
orps %xmm2, %xmm3
.L49:
cvttss2sil %xmm3, %ebx
leal 0(,%rbx,4), %esi
movslq %esi, %rsi
leaq 40(%rsp), %rdi
call cudaMalloc@PLT
movl 16(%rsp), %eax
leal 0(,%rax,4), %edx
movslq %edx, %rdx
movl $1, %ecx
movq %rbp, %rsi
movq 32(%rsp), %rdi
call cudaMemcpy@PLT
movl 16(%rsp), %r12d
call _ZNSt6chrono3_V212system_clock3nowEv@PLT
movq %rax, %r13
cmpl $1, %r12d
jg .L54
.L50:
call _ZNSt6chrono3_V212system_clock3nowEv@PLT
movq %rax, %r12
leaq .LC6(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
subq %r13, %r12
movabsq $2361183241434822607, %rdx
movq %r12, %rax
imulq %rdx
sarq $7, %rdx
sarq $63, %r12
movq %rdx, %rsi
subq %r12, %rsi
call _ZNSo9_M_insertIlEERSoT_@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
call _ZNSt6chrono3_V212system_clock3nowEv@PLT
movq %rax, %r13
movl 16(%rsp), %edx
testl %edx, %edx
jle .L55
movl $0, %eax
.L56:
addl $1, %eax
cmpl %edx, %eax
jne .L56
.L55:
call _ZNSt6chrono3_V212system_clock3nowEv@PLT
movq %rax, %r12
leaq .LC7(%rip), %rsi
leaq _ZSt4cout(%rip), %r14
movq %r14, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
subq %r13, %r12
movabsq $2361183241434822607, %rdx
movq %r12, %rax
imulq %rdx
sarq $7, %rdx
sarq $63, %r12
subq %r12, %rdx
movq %rdx, %rsi
call _ZNSo9_M_insertIlEERSoT_@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
leaq 20(%rsp), %rdi
movl $2, %ecx
movl $4, %edx
movq 32(%rsp), %rsi
call cudaMemcpy@PLT
leaq .LC8(%rip), %rsi
movq %r14, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl 20(%rsp), %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
leal 0(,%rbx,4), %esi
movslq %esi, %rsi
leaq 48(%rsp), %rdi
call cudaMalloc@PLT
movl 16(%rsp), %eax
leal 0(,%rax,4), %edx
movslq %edx, %rdx
movl $1, %ecx
movq %rbp, %rsi
movq 32(%rsp), %rdi
call cudaMemcpy@PLT
movl 16(%rsp), %r12d
pxor %xmm0, %xmm0
cvtsi2ssl %r12d, %xmm0
mulss .LC2(%rip), %xmm0
movaps %xmm0, %xmm3
movss .LC16(%rip), %xmm2
movaps %xmm0, %xmm1
andps %xmm2, %xmm1
movss .LC3(%rip), %xmm4
ucomiss %xmm1, %xmm4
jbe .L57
cvttss2sil %xmm0, %eax
pxor %xmm1, %xmm1
cvtsi2ssl %eax, %xmm1
cmpnless %xmm1, %xmm3
movss .LC5(%rip), %xmm4
andps %xmm4, %xmm3
addss %xmm1, %xmm3
andnps %xmm0, %xmm2
orps %xmm2, %xmm3
.L57:
cvttss2sil %xmm3, %ebx
cmpl $1, %r12d
jg .L61
.L58:
leaq 24(%rsp), %rdi
movl $2, %ecx
movl $4, %edx
movq 32(%rsp), %rsi
call cudaMemcpy@PLT
leaq .LC9(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl 24(%rsp), %esi
call _ZNSolsEi@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movl 20(%rsp), %r15d
pxor %xmm5, %xmm5
cvtsi2ssl 16(%rsp), %xmm5
movss %xmm5, 12(%rsp)
movaps %xmm5, %xmm0
mulss .LC2(%rip), %xmm0
movaps %xmm0, %xmm3
movss .LC16(%rip), %xmm2
movaps %xmm0, %xmm1
andps %xmm2, %xmm1
movss .LC3(%rip), %xmm4
ucomiss %xmm1, %xmm4
jbe .L62
cvttss2sil %xmm0, %eax
pxor %xmm1, %xmm1
cvtsi2ssl %eax, %xmm1
cmpnless %xmm1, %xmm3
movss .LC5(%rip), %xmm4
andps %xmm4, %xmm3
addss %xmm1, %xmm3
andnps %xmm0, %xmm2
orps %xmm2, %xmm3
.L62:
cvttss2sil %xmm3, %ebx
movslq %ebx, %r14
leaq 0(,%r14,4), %r13
leaq 64(%rsp), %rdi
movq %r13, %rsi
call cudaMalloc@PLT
movslq 16(%rsp), %rax
movabsq $2305843009213693950, %rdx
cmpq %rax, %rdx
jb .L63
leaq 0(,%rax,4), %rdi
call _Znam@PLT
movq %rax, %r12
movl 16(%rsp), %ecx
movslq %ecx, %rdx
salq $2, %rdx
movl $0, %eax
testl %ecx, %ecx
jle .L65
.L67:
pxor %xmm0, %xmm0
cvtsi2ssl 0(%rbp,%rax), %xmm0
movss %xmm0, (%r12,%rax)
addq $4, %rax
cmpq %rax, %rdx
jne .L67
.L65:
movslq %ecx, %rcx
leaq 0(,%rcx,4), %rsi
leaq 56(%rsp), %rdi
call cudaMalloc@PLT
movl 16(%rsp), %eax
leal 0(,%rax,4), %edx
movslq %edx, %rdx
movl $1, %ecx
movq %r12, %rsi
movq 56(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, 92(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
movl %ebx, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 88(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 92(%rsp), %rdx
movl $1, %ecx
movq 80(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L123
.L68:
movabsq $2305843009213693950, %rax
cmpq %r14, %rax
jb .L69
movq %r13, %rdi
call _Znam@PLT
movq %rax, %rbp
movl $2, %ecx
movq %r13, %rdx
movq 64(%rsp), %rsi
movq %rax, %rdi
call cudaMemcpy@PLT
movq %rbp, %rax
leaq 0(%rbp,%r13), %rdx
pxor %xmm0, %xmm0
testl %ebx, %ebx
jle .L124
.L73:
addss (%rax), %xmm0
addq $4, %rax
cmpq %rdx, %rax
jne .L73
pxor %xmm1, %xmm1
cvtsi2ssl 16(%rsp), %xmm1
divss %xmm1, %xmm0
pxor %xmm1, %xmm1
ucomiss %xmm0, %xmm1
ja .L119
.L102:
sqrtss %xmm0, %xmm0
movss %xmm0, 12(%rsp)
.L76:
leaq .LC10(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
pxor %xmm0, %xmm0
cvtss2sd 12(%rsp), %xmm0
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
cmpl $1, %ebx
jg .L84
.L77:
leaq 28(%rsp), %rdi
movl $2, %ecx
movl $4, %edx
movq 64(%rsp), %rsi
call cudaMemcpy@PLT
pxor %xmm1, %xmm1
cvtsi2ssl 16(%rsp), %xmm1
movss 28(%rsp), %xmm0
divss %xmm1, %xmm0
pxor %xmm1, %xmm1
ucomiss %xmm0, %xmm1
ja .L120
sqrtss %xmm0, %xmm0
.L87:
movss %xmm0, 28(%rsp)
leaq .LC12(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
pxor %xmm0, %xmm0
cvtss2sd 28(%rsp), %xmm0
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
pxor %xmm0, %xmm0
cvtsi2ssl 16(%rsp), %xmm0
mulss .LC2(%rip), %xmm0
movaps %xmm0, %xmm3
movss .LC16(%rip), %xmm2
movaps %xmm0, %xmm1
andps %xmm2, %xmm1
movss .LC3(%rip), %xmm4
ucomiss %xmm1, %xmm4
jbe .L88
cvttss2sil %xmm0, %eax
pxor %xmm1, %xmm1
cvtsi2ssl %eax, %xmm1
cmpnless %xmm1, %xmm3
movss .LC5(%rip), %xmm4
andps %xmm4, %xmm3
addss %xmm1, %xmm3
andnps %xmm0, %xmm2
orps %xmm2, %xmm3
.L88:
cvttss2sil %xmm3, %r14d
movslq %r14d, %rsi
salq $2, %rsi
leaq 72(%rsp), %rdi
call cudaMalloc@PLT
movl 16(%rsp), %eax
leal 0(,%rax,4), %edx
movslq %edx, %rdx
movl $1, %ecx
movq %r12, %rsi
movq 56(%rsp), %rdi
call cudaMemcpy@PLT
movl 16(%rsp), %ebx
cmpl $1, %ebx
jg .L101
.L89:
movl $0x00000000, 92(%rsp)
leaq 92(%rsp), %rdi
movl $2, %ecx
movl $4, %edx
movq 72(%rsp), %rsi
call cudaMemcpy@PLT
leaq .LC15(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
pxor %xmm0, %xmm0
cvtss2sd 92(%rsp), %xmm0
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L125
movl $0, %eax
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L44:
.cfi_restore_state
movq 104(%rsp), %rax
subq %fs:40, %rax
je .L47
call __stack_chk_fail@PLT
.L47:
call __cxa_throw_bad_array_new_length@PLT
.L126:
movl %r12d, %edx
movq 40(%rsp), %rsi
movq 32(%rsp), %rdi
call _Z25__device_stub__Z3sumPiS_iPiS_i
jmp .L51
.L53:
cvttss2sil %xmm2, %ebx
cmpl $1, %r12d
jle .L50
.L54:
movl $1, 92(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
movl %ebx, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 88(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 92(%rsp), %rdx
movl $1, %ecx
movq 80(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L126
.L51:
leal 0(,%rbx,4), %edx
movslq %edx, %rdx
movl $3, %ecx
movq 40(%rsp), %rsi
movq 32(%rsp), %rdi
call cudaMemcpy@PLT
pxor %xmm0, %xmm0
cvtsi2ssl %r12d, %xmm0
mulss .LC2(%rip), %xmm0
movaps %xmm0, %xmm2
movss .LC16(%rip), %xmm1
andps %xmm0, %xmm1
movss .LC3(%rip), %xmm5
ucomiss %xmm1, %xmm5
jbe .L52
cvttss2sil %xmm0, %eax
pxor %xmm1, %xmm1
cvtsi2ssl %eax, %xmm1
cmpnless %xmm1, %xmm2
movss .LC5(%rip), %xmm3
andps %xmm3, %xmm2
addss %xmm2, %xmm1
movss .LC16(%rip), %xmm2
andnps %xmm0, %xmm2
orps %xmm1, %xmm2
.L52:
cvttss2sil %xmm2, %r12d
pxor %xmm0, %xmm0
cvtsi2ssl %r12d, %xmm0
mulss .LC2(%rip), %xmm0
movaps %xmm0, %xmm2
movss .LC16(%rip), %xmm1
andps %xmm0, %xmm1
movss .LC3(%rip), %xmm3
ucomiss %xmm1, %xmm3
jbe .L53
cvttss2sil %xmm0, %eax
pxor %xmm1, %xmm1
cvtsi2ssl %eax, %xmm1
cmpnless %xmm1, %xmm2
movss .LC5(%rip), %xmm3
andps %xmm3, %xmm2
addss %xmm2, %xmm1
movss .LC16(%rip), %xmm2
andnps %xmm0, %xmm2
orps %xmm1, %xmm2
jmp .L53
.L127:
movl %r12d, %edx
movq 48(%rsp), %rsi
movq 32(%rsp), %rdi
call _Z26__device_stub__Z4pMinPiS_iPiS_i
jmp .L59
.L60:
cvttss2sil %xmm2, %eax
movl %ebx, %r12d
cmpl $1, %ebx
jle .L58
movl %eax, %ebx
.L61:
movl $1, 92(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
movl %ebx, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 88(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 92(%rsp), %rdx
movl $1, %ecx
movq 80(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L127
.L59:
leal 0(,%rbx,4), %edx
movslq %edx, %rdx
movl $3, %ecx
movq 48(%rsp), %rsi
movq 32(%rsp), %rdi
call cudaMemcpy@PLT
pxor %xmm0, %xmm0
cvtsi2ssl %ebx, %xmm0
mulss .LC2(%rip), %xmm0
movaps %xmm0, %xmm2
movss .LC16(%rip), %xmm1
andps %xmm0, %xmm1
movss .LC3(%rip), %xmm6
ucomiss %xmm1, %xmm6
jbe .L60
cvttss2sil %xmm0, %eax
pxor %xmm1, %xmm1
cvtsi2ssl %eax, %xmm1
cmpnless %xmm1, %xmm2
movss .LC5(%rip), %xmm5
andps %xmm5, %xmm2
addss %xmm2, %xmm1
movss .LC16(%rip), %xmm2
andnps %xmm0, %xmm2
orps %xmm1, %xmm2
jmp .L60
.L63:
movq 104(%rsp), %rax
subq %fs:40, %rax
je .L66
call __stack_chk_fail@PLT
.L66:
call __cxa_throw_bad_array_new_length@PLT
.L123:
pxor %xmm0, %xmm0
cvtsi2ssl %r15d, %xmm0
divss 12(%rsp), %xmm0
movl 16(%rsp), %edx
movq 64(%rsp), %rsi
movq 56(%rsp), %rdi
call _Z41__device_stub__Z17standardDeviationPfS_fiPfS_fi
jmp .L68
.L69:
movq 104(%rsp), %rax
subq %fs:40, %rax
je .L72
call __stack_chk_fail@PLT
.L72:
call __cxa_throw_bad_array_new_length@PLT
.L119:
call sqrtf@PLT
movss %xmm0, 12(%rsp)
jmp .L76
.L78:
movl %ebx, %r13d
cvttss2sil %xmm2, %ebx
movl $24, %edx
leaq .LC11(%rip), %rsi
leaq _ZSt4cout(%rip), %rbp
movq %rbp, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movl %ebx, %esi
movq %rbp, %rdi
call _ZNSolsEi@PLT
movq %rax, %rbp
movq (%rax), %rax
movq -24(%rax), %rax
movq 240(%rbp,%rax), %r14
testq %r14, %r14
je .L128
cmpb $0, 56(%r14)
je .L81
movzbl 67(%r14), %esi
.L82:
movsbl %sil, %esi
movq %rbp, %rdi
call _ZNSo3putEc@PLT
movq %rax, %rdi
call _ZNSo5flushEv@PLT
movl $1, 92(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
movl %ebx, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 88(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 92(%rsp), %rdx
movl $1, %ecx
movq 80(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L129
.L83:
cmpl $1, %ebx
jle .L77
.L84:
movslq %ebx, %rdx
salq $2, %rdx
movl $3, %ecx
movq 64(%rsp), %rsi
movq 56(%rsp), %rdi
call cudaMemcpy@PLT
pxor %xmm0, %xmm0
cvtsi2ssl %ebx, %xmm0
mulss .LC2(%rip), %xmm0
movaps %xmm0, %xmm2
movss .LC16(%rip), %xmm1
andps %xmm0, %xmm1
movss .LC3(%rip), %xmm7
ucomiss %xmm1, %xmm7
jbe .L78
cvttss2sil %xmm0, %eax
pxor %xmm1, %xmm1
cvtsi2ssl %eax, %xmm1
cmpnless %xmm1, %xmm2
movss .LC5(%rip), %xmm6
andps %xmm6, %xmm2
addss %xmm2, %xmm1
movss .LC16(%rip), %xmm2
andnps %xmm0, %xmm2
orps %xmm1, %xmm2
jmp .L78
.L128:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L130
call _ZSt16__throw_bad_castv@PLT
.L130:
call __stack_chk_fail@PLT
.L81:
movq %r14, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq (%r14), %rax
movl $10, %esi
movq %r14, %rdi
call *48(%rax)
movl %eax, %esi
jmp .L82
.L129:
movl %r13d, %edx
movq 64(%rsp), %rsi
movq 56(%rsp), %rdi
call _Z28__device_stub__Z6addAllPfS_iPfS_i
jmp .L83
.L120:
call sqrtf@PLT
jmp .L87
.L132:
movl %ebx, %edx
movq 72(%rsp), %rsi
movq 56(%rsp), %rdi
call _Z26__device_stub__Z4pMaxPfS_iPfS_i
jmp .L90
.L91:
movq 104(%rsp), %rax
subq %fs:40, %rax
je .L94
call __stack_chk_fail@PLT
.L94:
call __cxa_throw_bad_array_new_length@PLT
.L133:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L131
call _ZSt16__throw_bad_castv@PLT
.L131:
call __stack_chk_fail@PLT
.L98:
movq %rbx, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq (%rbx), %rax
movl $10, %esi
movq %rbx, %rdi
call *48(%rax)
movl %eax, %esi
jmp .L99
.L100:
cvttss2sil %xmm3, %eax
movl %r14d, %ebx
cmpl $1, %r14d
jle .L89
movl %eax, %r14d
.L101:
movl $1, 92(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
movl %r14d, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 88(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 92(%rsp), %rdx
movl $1, %ecx
movq 80(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L132
.L90:
movslq %r14d, %rbx
leaq 0(,%rbx,4), %r12
movl $3, %ecx
movq %r12, %rdx
movq 72(%rsp), %rsi
movq 56(%rsp), %rdi
call cudaMemcpy@PLT
movabsq $2305843009213693950, %rax
cmpq %rbx, %rax
jb .L91
movq %r12, %rdi
call _Znam@PLT
movq %rax, %rbp
movl $2, %ecx
movq %r12, %rdx
movq 72(%rsp), %rsi
movq %rax, %rdi
call cudaMemcpy@PLT
movl $29, %edx
leaq .LC13(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movq %rbp, %rbx
addq %r12, %rbp
leaq _ZSt4cout(%rip), %r13
leaq .LC14(%rip), %r12
testl %r14d, %r14d
jle .L93
.L95:
pxor %xmm0, %xmm0
cvtss2sd (%rbx), %xmm0
movq %r13, %rdi
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
movl $1, %edx
movq %r12, %rsi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
addq $4, %rbx
cmpq %rbx, %rbp
jne .L95
.L93:
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
leaq _ZSt4cout(%rip), %rdx
movq 240(%rdx,%rax), %rbx
testq %rbx, %rbx
je .L133
cmpb $0, 56(%rbx)
je .L98
movzbl 67(%rbx), %esi
.L99:
movsbl %sil, %esi
leaq _ZSt4cout(%rip), %rdi
call _ZNSo3putEc@PLT
movq %rax, %rdi
call _ZNSo5flushEv@PLT
pxor %xmm0, %xmm0
cvtsi2ssl %r14d, %xmm0
mulss .LC2(%rip), %xmm0
movaps %xmm0, %xmm3
movss .LC4(%rip), %xmm2
movaps %xmm0, %xmm1
andps %xmm2, %xmm1
movss .LC3(%rip), %xmm4
ucomiss %xmm1, %xmm4
jbe .L100
cvttss2sil %xmm0, %eax
pxor %xmm1, %xmm1
cvtsi2ssl %eax, %xmm1
cmpnless %xmm1, %xmm3
movss .LC5(%rip), %xmm7
andps %xmm7, %xmm3
addss %xmm1, %xmm3
andnps %xmm0, %xmm2
orps %xmm2, %xmm3
jmp .L100
.L124:
pxor %xmm1, %xmm1
cvtsi2ssl 16(%rsp), %xmm1
pxor %xmm0, %xmm0
divss %xmm1, %xmm0
jmp .L102
.L125:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3768:
.size main, .-main
.section .rodata.str1.1
.LC17:
.string "_Z4pMaxPfS_i"
.LC18:
.string "_Z6addAllPfS_i"
.LC19:
.string "_Z17standardDeviationPfS_fi"
.LC20:
.string "_Z4pMinPiS_i"
.LC21:
.string "_Z3sumPiS_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3806:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC17(%rip), %rdx
movq %rdx, %rcx
leaq _Z4pMaxPfS_i(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC18(%rip), %rdx
movq %rdx, %rcx
leaq _Z6addAllPfS_i(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC19(%rip), %rdx
movq %rdx, %rcx
leaq _Z17standardDeviationPfS_fi(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC20(%rip), %rdx
movq %rdx, %rcx
leaq _Z4pMinPiS_i(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC21(%rip), %rdx
movq %rdx, %rcx
leaq _Z3sumPiS_i(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3806:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC2:
.long 998244352
.align 4
.LC3:
.long 1258291200
.section .rodata.cst16,"aM",@progbits,16
.align 16
.LC4:
.long 2147483647
.long 0
.long 0
.long 0
.section .rodata.cst4
.align 4
.LC5:
.long 1065353216
.set .LC16,.LC4
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <iostream>
#include <cmath>
#include <cuda.h>
#include <chrono>
using namespace std;
using namespace std::chrono;
__global__ void sum(int *arr, int *sumArr, int n){
int start = blockIdx.x * 256;
int sum = 0;
for(int i = start; i < min(start+256,n); i++){
sum += arr[i];
}
sumArr[blockIdx.x] = sum;
}
__global__ void pMin(int *arr, int *minArr, int n){
int start = blockIdx.x * 256;
int minVal = 9999999;
for(int i = start; i < min(start+256,n); i++){
if(arr[i] < minVal){minVal = arr[i];}
}
minArr[blockIdx.x] = minVal;
}
//These 2 functions are for standard deviation
__global__ void standardDeviation(float *arr, float* calcArr, float mean, int n){
int start = blockIdx.x * 256;
float aggregate = 0;
for(int i = start; i < min(start + 256, n); i++){
aggregate = aggregate + ((arr[i] - mean) *(arr[i] - mean));
}
calcArr[blockIdx.x] = aggregate;
}
__global__ void addAll(float *arr, float *calcArr, int n){
int start = blockIdx.x * 256;
float sum = 0;
for(int i = start; i < min(start + 256, n); i++){
sum += arr[i];
}
calcArr[blockIdx.x] = sum;
}
__global__ void pMax(float *arr, float *maxArr, int n){
int start = blockIdx.x * 256;
int maxm = -9999999;
for(int i = start; i < min(start+256,n); i++){
if(arr[i] > maxm){
maxm = arr[i];
}
}
maxArr[blockIdx.x] = maxm;
}
int main(){
int n;
cout << "Enter number of elements: ";
cin >> n;
int *hostArr,*devArr,*devSumArr, *devMinArr;
hostArr = new int[n];
for(int i = 0; i < n; i++){
hostArr[i] = i+1;
}
cudaMalloc(&devArr, n * 4);
int blocks = ceil(n * 1.0f/256.0f);
cudaMalloc(&devSumArr, blocks * 4);
cudaMemcpy(devArr, hostArr, n*4, cudaMemcpyHostToDevice);
//Sum
int curr = n;
auto start = high_resolution_clock::now();
while(curr > 1){
sum<<<blocks, 1>>>(devArr, devSumArr, curr);
cudaMemcpy(devArr,devSumArr,blocks*4,cudaMemcpyDeviceToDevice);
curr = ceil(curr* 1.0f/256.0f);
blocks = ceil(curr*1.0f/256.0f);
}
auto stop = high_resolution_clock::now();
cout << "parallel: " << duration_cast<microseconds>(stop - start).count() << endl;
start = high_resolution_clock::now();
long myVal = 0;
for(int i = 0; i < n; i++){
myVal = myVal + hostArr[i];
}
stop = high_resolution_clock::now();
cout << "serial: " << duration_cast<microseconds>(stop - start).count() << endl;
int sum;
cudaMemcpy(&sum,devArr,4,cudaMemcpyDeviceToHost);
cout << "Sum: " << sum << endl;
//Min value i
cudaMalloc(&devMinArr, blocks * 4);
//Refill device array with values of host array
cudaMemcpy(devArr,hostArr,n*4,cudaMemcpyHostToDevice);
curr = n;
blocks = ceil(n * 1.0f/256.0f);
while(curr > 1){
pMin<<<blocks, 1>>>(devArr, devMinArr, curr);
cudaMemcpy(devArr, devMinArr, blocks*4, cudaMemcpyDeviceToDevice);
curr = blocks;
blocks = ceil(curr*1.0f/256.0f);
}
int minVal;
cudaMemcpy(&minVal, devArr, 4, cudaMemcpyDeviceToHost);
cout << "min val: " << minVal << endl;
float mean = (float)sum/n;
//Standard deviation
float *fDevArr, *fStdSum;
blocks = ceil(n * 1.0f/ 256.0f);
//Create an aggregate array
cudaMalloc(&fStdSum, sizeof(float) * blocks);
//Copy mean's value to gpu mean
float *floatHost = new float[n];
for(int i = 0; i < n; i++){
floatHost[i] = (float)hostArr[i];
}
//Allocate device array space in gpu
cudaMalloc(&fDevArr, sizeof(float) * n);
//Refill device array with values of host array
cudaMemcpy(fDevArr,floatHost, 4 * n, cudaMemcpyHostToDevice);
standardDeviation<<<blocks, 1>>>(fDevArr, fStdSum, mean, n);
float *myArr = new float[blocks];
cudaMemcpy(myArr, fStdSum, sizeof(float) * blocks, cudaMemcpyDeviceToHost);
float total = 0;
for(int i = 0; i < blocks; i++){
total += myArr[i];
}
total /= n;
total = sqrt(total);
cout << "validation standard deviation: " << total << endl;
curr = blocks;
while(curr > 1){
cudaMemcpy(fDevArr, fStdSum, curr * sizeof(float), cudaMemcpyDeviceToDevice);
blocks = ceil(curr * 1.0f/256.0f);
cout << "blocks for aggregation: " << blocks << endl;
addAll<<<blocks, 1>>>(fDevArr,fStdSum, curr);
curr = blocks;
}
float stdDeviation;
cudaMemcpy(&stdDeviation, fStdSum, sizeof(float), cudaMemcpyDeviceToHost);
stdDeviation /= n;
stdDeviation = sqrt(stdDeviation);
cout << "Standard deviation: " << stdDeviation << endl;
float *intermediateMax;
blocks = ceil(n * 1.0f/256.0f);
cudaMalloc(&intermediateMax, blocks * sizeof(float));
cudaMemcpy(fDevArr,floatHost, 4 * n, cudaMemcpyHostToDevice);
curr = n;
while(curr > 1){
pMax<<<blocks, 1>>>(fDevArr, intermediateMax, curr);
cudaMemcpy(fDevArr, intermediateMax, blocks*sizeof(float), cudaMemcpyDeviceToDevice);
float *tempArr = new float[blocks];
cudaMemcpy(tempArr, intermediateMax, blocks*sizeof(float), cudaMemcpyDeviceToHost);
cout << "Intermediate maximum values: ";
for(int i = 0; i < blocks; i++){
cout << tempArr[i] << " ";
}
cout << endl;
curr = blocks;
blocks = ceil(curr * 1.0f/256.0f);
}
float maxm = 0;
cudaMemcpy(&maxm, intermediateMax, sizeof(float), cudaMemcpyDeviceToHost);
cout << "Maximum: " << maxm << endl;
} | #include <iostream>
#include <cmath>
#include <hip/hip_runtime.h>
#include <chrono>
using namespace std;
using namespace std::chrono;
__global__ void sum(int *arr, int *sumArr, int n){
int start = blockIdx.x * 256;
int sum = 0;
for(int i = start; i < min(start+256,n); i++){
sum += arr[i];
}
sumArr[blockIdx.x] = sum;
}
__global__ void pMin(int *arr, int *minArr, int n){
int start = blockIdx.x * 256;
int minVal = 9999999;
for(int i = start; i < min(start+256,n); i++){
if(arr[i] < minVal){minVal = arr[i];}
}
minArr[blockIdx.x] = minVal;
}
//These 2 functions are for standard deviation
__global__ void standardDeviation(float *arr, float* calcArr, float mean, int n){
int start = blockIdx.x * 256;
float aggregate = 0;
for(int i = start; i < min(start + 256, n); i++){
aggregate = aggregate + ((arr[i] - mean) *(arr[i] - mean));
}
calcArr[blockIdx.x] = aggregate;
}
__global__ void addAll(float *arr, float *calcArr, int n){
int start = blockIdx.x * 256;
float sum = 0;
for(int i = start; i < min(start + 256, n); i++){
sum += arr[i];
}
calcArr[blockIdx.x] = sum;
}
__global__ void pMax(float *arr, float *maxArr, int n){
int start = blockIdx.x * 256;
int maxm = -9999999;
for(int i = start; i < min(start+256,n); i++){
if(arr[i] > maxm){
maxm = arr[i];
}
}
maxArr[blockIdx.x] = maxm;
}
int main(){
int n;
cout << "Enter number of elements: ";
cin >> n;
int *hostArr,*devArr,*devSumArr, *devMinArr;
hostArr = new int[n];
for(int i = 0; i < n; i++){
hostArr[i] = i+1;
}
hipMalloc(&devArr, n * 4);
int blocks = ceil(n * 1.0f/256.0f);
hipMalloc(&devSumArr, blocks * 4);
hipMemcpy(devArr, hostArr, n*4, hipMemcpyHostToDevice);
//Sum
int curr = n;
auto start = high_resolution_clock::now();
while(curr > 1){
sum<<<blocks, 1>>>(devArr, devSumArr, curr);
hipMemcpy(devArr,devSumArr,blocks*4,hipMemcpyDeviceToDevice);
curr = ceil(curr* 1.0f/256.0f);
blocks = ceil(curr*1.0f/256.0f);
}
auto stop = high_resolution_clock::now();
cout << "parallel: " << duration_cast<microseconds>(stop - start).count() << endl;
start = high_resolution_clock::now();
long myVal = 0;
for(int i = 0; i < n; i++){
myVal = myVal + hostArr[i];
}
stop = high_resolution_clock::now();
cout << "serial: " << duration_cast<microseconds>(stop - start).count() << endl;
int sum;
hipMemcpy(&sum,devArr,4,hipMemcpyDeviceToHost);
cout << "Sum: " << sum << endl;
//Min value i
hipMalloc(&devMinArr, blocks * 4);
//Refill device array with values of host array
hipMemcpy(devArr,hostArr,n*4,hipMemcpyHostToDevice);
curr = n;
blocks = ceil(n * 1.0f/256.0f);
while(curr > 1){
pMin<<<blocks, 1>>>(devArr, devMinArr, curr);
hipMemcpy(devArr, devMinArr, blocks*4, hipMemcpyDeviceToDevice);
curr = blocks;
blocks = ceil(curr*1.0f/256.0f);
}
int minVal;
hipMemcpy(&minVal, devArr, 4, hipMemcpyDeviceToHost);
cout << "min val: " << minVal << endl;
float mean = (float)sum/n;
//Standard deviation
float *fDevArr, *fStdSum;
blocks = ceil(n * 1.0f/ 256.0f);
//Create an aggregate array
hipMalloc(&fStdSum, sizeof(float) * blocks);
//Copy mean's value to gpu mean
float *floatHost = new float[n];
for(int i = 0; i < n; i++){
floatHost[i] = (float)hostArr[i];
}
//Allocate device array space in gpu
hipMalloc(&fDevArr, sizeof(float) * n);
//Refill device array with values of host array
hipMemcpy(fDevArr,floatHost, 4 * n, hipMemcpyHostToDevice);
standardDeviation<<<blocks, 1>>>(fDevArr, fStdSum, mean, n);
float *myArr = new float[blocks];
hipMemcpy(myArr, fStdSum, sizeof(float) * blocks, hipMemcpyDeviceToHost);
float total = 0;
for(int i = 0; i < blocks; i++){
total += myArr[i];
}
total /= n;
total = sqrt(total);
cout << "validation standard deviation: " << total << endl;
curr = blocks;
while(curr > 1){
hipMemcpy(fDevArr, fStdSum, curr * sizeof(float), hipMemcpyDeviceToDevice);
blocks = ceil(curr * 1.0f/256.0f);
cout << "blocks for aggregation: " << blocks << endl;
addAll<<<blocks, 1>>>(fDevArr,fStdSum, curr);
curr = blocks;
}
float stdDeviation;
hipMemcpy(&stdDeviation, fStdSum, sizeof(float), hipMemcpyDeviceToHost);
stdDeviation /= n;
stdDeviation = sqrt(stdDeviation);
cout << "Standard deviation: " << stdDeviation << endl;
float *intermediateMax;
blocks = ceil(n * 1.0f/256.0f);
hipMalloc(&intermediateMax, blocks * sizeof(float));
hipMemcpy(fDevArr,floatHost, 4 * n, hipMemcpyHostToDevice);
curr = n;
while(curr > 1){
pMax<<<blocks, 1>>>(fDevArr, intermediateMax, curr);
hipMemcpy(fDevArr, intermediateMax, blocks*sizeof(float), hipMemcpyDeviceToDevice);
float *tempArr = new float[blocks];
hipMemcpy(tempArr, intermediateMax, blocks*sizeof(float), hipMemcpyDeviceToHost);
cout << "Intermediate maximum values: ";
for(int i = 0; i < blocks; i++){
cout << tempArr[i] << " ";
}
cout << endl;
curr = blocks;
blocks = ceil(curr * 1.0f/256.0f);
}
float maxm = 0;
hipMemcpy(&maxm, intermediateMax, sizeof(float), hipMemcpyDeviceToHost);
cout << "Maximum: " << maxm << endl;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <iostream>
#include <cmath>
#include <hip/hip_runtime.h>
#include <chrono>
using namespace std;
using namespace std::chrono;
__global__ void sum(int *arr, int *sumArr, int n){
int start = blockIdx.x * 256;
int sum = 0;
for(int i = start; i < min(start+256,n); i++){
sum += arr[i];
}
sumArr[blockIdx.x] = sum;
}
__global__ void pMin(int *arr, int *minArr, int n){
int start = blockIdx.x * 256;
int minVal = 9999999;
for(int i = start; i < min(start+256,n); i++){
if(arr[i] < minVal){minVal = arr[i];}
}
minArr[blockIdx.x] = minVal;
}
//These 2 functions are for standard deviation
__global__ void standardDeviation(float *arr, float* calcArr, float mean, int n){
int start = blockIdx.x * 256;
float aggregate = 0;
for(int i = start; i < min(start + 256, n); i++){
aggregate = aggregate + ((arr[i] - mean) *(arr[i] - mean));
}
calcArr[blockIdx.x] = aggregate;
}
__global__ void addAll(float *arr, float *calcArr, int n){
int start = blockIdx.x * 256;
float sum = 0;
for(int i = start; i < min(start + 256, n); i++){
sum += arr[i];
}
calcArr[blockIdx.x] = sum;
}
__global__ void pMax(float *arr, float *maxArr, int n){
int start = blockIdx.x * 256;
int maxm = -9999999;
for(int i = start; i < min(start+256,n); i++){
if(arr[i] > maxm){
maxm = arr[i];
}
}
maxArr[blockIdx.x] = maxm;
}
int main(){
int n;
cout << "Enter number of elements: ";
cin >> n;
int *hostArr,*devArr,*devSumArr, *devMinArr;
hostArr = new int[n];
for(int i = 0; i < n; i++){
hostArr[i] = i+1;
}
hipMalloc(&devArr, n * 4);
int blocks = ceil(n * 1.0f/256.0f);
hipMalloc(&devSumArr, blocks * 4);
hipMemcpy(devArr, hostArr, n*4, hipMemcpyHostToDevice);
//Sum
int curr = n;
auto start = high_resolution_clock::now();
while(curr > 1){
sum<<<blocks, 1>>>(devArr, devSumArr, curr);
hipMemcpy(devArr,devSumArr,blocks*4,hipMemcpyDeviceToDevice);
curr = ceil(curr* 1.0f/256.0f);
blocks = ceil(curr*1.0f/256.0f);
}
auto stop = high_resolution_clock::now();
cout << "parallel: " << duration_cast<microseconds>(stop - start).count() << endl;
start = high_resolution_clock::now();
long myVal = 0;
for(int i = 0; i < n; i++){
myVal = myVal + hostArr[i];
}
stop = high_resolution_clock::now();
cout << "serial: " << duration_cast<microseconds>(stop - start).count() << endl;
int sum;
hipMemcpy(&sum,devArr,4,hipMemcpyDeviceToHost);
cout << "Sum: " << sum << endl;
//Min value i
hipMalloc(&devMinArr, blocks * 4);
//Refill device array with values of host array
hipMemcpy(devArr,hostArr,n*4,hipMemcpyHostToDevice);
curr = n;
blocks = ceil(n * 1.0f/256.0f);
while(curr > 1){
pMin<<<blocks, 1>>>(devArr, devMinArr, curr);
hipMemcpy(devArr, devMinArr, blocks*4, hipMemcpyDeviceToDevice);
curr = blocks;
blocks = ceil(curr*1.0f/256.0f);
}
int minVal;
hipMemcpy(&minVal, devArr, 4, hipMemcpyDeviceToHost);
cout << "min val: " << minVal << endl;
float mean = (float)sum/n;
//Standard deviation
float *fDevArr, *fStdSum;
blocks = ceil(n * 1.0f/ 256.0f);
//Create an aggregate array
hipMalloc(&fStdSum, sizeof(float) * blocks);
//Copy mean's value to gpu mean
float *floatHost = new float[n];
for(int i = 0; i < n; i++){
floatHost[i] = (float)hostArr[i];
}
//Allocate device array space in gpu
hipMalloc(&fDevArr, sizeof(float) * n);
//Refill device array with values of host array
hipMemcpy(fDevArr,floatHost, 4 * n, hipMemcpyHostToDevice);
standardDeviation<<<blocks, 1>>>(fDevArr, fStdSum, mean, n);
float *myArr = new float[blocks];
hipMemcpy(myArr, fStdSum, sizeof(float) * blocks, hipMemcpyDeviceToHost);
float total = 0;
for(int i = 0; i < blocks; i++){
total += myArr[i];
}
total /= n;
total = sqrt(total);
cout << "validation standard deviation: " << total << endl;
curr = blocks;
while(curr > 1){
hipMemcpy(fDevArr, fStdSum, curr * sizeof(float), hipMemcpyDeviceToDevice);
blocks = ceil(curr * 1.0f/256.0f);
cout << "blocks for aggregation: " << blocks << endl;
addAll<<<blocks, 1>>>(fDevArr,fStdSum, curr);
curr = blocks;
}
float stdDeviation;
hipMemcpy(&stdDeviation, fStdSum, sizeof(float), hipMemcpyDeviceToHost);
stdDeviation /= n;
stdDeviation = sqrt(stdDeviation);
cout << "Standard deviation: " << stdDeviation << endl;
float *intermediateMax;
blocks = ceil(n * 1.0f/256.0f);
hipMalloc(&intermediateMax, blocks * sizeof(float));
hipMemcpy(fDevArr,floatHost, 4 * n, hipMemcpyHostToDevice);
curr = n;
while(curr > 1){
pMax<<<blocks, 1>>>(fDevArr, intermediateMax, curr);
hipMemcpy(fDevArr, intermediateMax, blocks*sizeof(float), hipMemcpyDeviceToDevice);
float *tempArr = new float[blocks];
hipMemcpy(tempArr, intermediateMax, blocks*sizeof(float), hipMemcpyDeviceToHost);
cout << "Intermediate maximum values: ";
for(int i = 0; i < blocks; i++){
cout << tempArr[i] << " ";
}
cout << endl;
curr = blocks;
blocks = ceil(curr * 1.0f/256.0f);
}
float maxm = 0;
hipMemcpy(&maxm, intermediateMax, sizeof(float), hipMemcpyDeviceToHost);
cout << "Maximum: " << maxm << endl;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z3sumPiS_i
.globl _Z3sumPiS_i
.p2align 8
.type _Z3sumPiS_i,@function
_Z3sumPiS_i:
s_load_b32 s3, s[0:1], 0x10
s_lshl_b32 s4, s15, 8
s_mov_b32 s2, s15
s_mov_b32 s5, 0
s_waitcnt lgkmcnt(0)
s_cmp_ge_i32 s4, s3
s_cbranch_scc1 .LBB0_3
s_load_b64 s[6:7], s[0:1], 0x0
s_ashr_i32 s5, s4, 31
s_add_i32 s10, s4, 0x100
s_lshl_b64 s[8:9], s[4:5], 2
s_min_i32 s3, s10, s3
s_mov_b32 s5, 0
s_waitcnt lgkmcnt(0)
s_add_u32 s6, s6, s8
s_addc_u32 s7, s7, s9
.LBB0_2:
s_load_b32 s8, s[6:7], 0x0
s_add_i32 s4, s4, 1
s_waitcnt lgkmcnt(0)
s_add_i32 s5, s8, s5
s_add_u32 s6, s6, 4
s_addc_u32 s7, s7, 0
s_cmp_ge_i32 s4, s3
s_cbranch_scc0 .LBB0_2
.LBB0_3:
s_load_b64 s[0:1], s[0:1], 0x8
s_mov_b32 s3, 0
v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s5
s_lshl_b64 s[2:3], s[2:3], 2
s_waitcnt lgkmcnt(0)
s_add_u32 s0, s0, s2
s_addc_u32 s1, s1, s3
global_store_b32 v0, v1, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z3sumPiS_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 20
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 2
.amdhsa_next_free_sgpr 16
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z3sumPiS_i, .Lfunc_end0-_Z3sumPiS_i
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z4pMinPiS_i
.globl _Z4pMinPiS_i
.p2align 8
.type _Z4pMinPiS_i,@function
_Z4pMinPiS_i:
s_load_b32 s3, s[0:1], 0x10
s_lshl_b32 s4, s15, 8
s_mov_b32 s2, s15
s_mov_b32 s5, 0x98967f
s_waitcnt lgkmcnt(0)
s_cmp_ge_i32 s4, s3
s_cbranch_scc1 .LBB1_3
s_load_b64 s[6:7], s[0:1], 0x0
s_ashr_i32 s5, s4, 31
s_add_i32 s10, s4, 0x100
s_lshl_b64 s[8:9], s[4:5], 2
s_min_i32 s3, s10, s3
s_mov_b32 s5, 0x98967f
s_waitcnt lgkmcnt(0)
s_add_u32 s6, s6, s8
s_addc_u32 s7, s7, s9
.LBB1_2:
s_load_b32 s8, s[6:7], 0x0
s_add_i32 s4, s4, 1
s_waitcnt lgkmcnt(0)
s_min_i32 s5, s8, s5
s_add_u32 s6, s6, 4
s_addc_u32 s7, s7, 0
s_cmp_ge_i32 s4, s3
s_cbranch_scc0 .LBB1_2
.LBB1_3:
s_load_b64 s[0:1], s[0:1], 0x8
s_mov_b32 s3, 0
v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s5
s_lshl_b64 s[2:3], s[2:3], 2
s_waitcnt lgkmcnt(0)
s_add_u32 s0, s0, s2
s_addc_u32 s1, s1, s3
global_store_b32 v0, v1, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z4pMinPiS_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 20
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 2
.amdhsa_next_free_sgpr 16
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z4pMinPiS_i, .Lfunc_end1-_Z4pMinPiS_i
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z17standardDeviationPfS_fi
.globl _Z17standardDeviationPfS_fi
.p2align 8
.type _Z17standardDeviationPfS_fi,@function
_Z17standardDeviationPfS_fi:
s_load_b32 s6, s[0:1], 0x14
v_mov_b32_e32 v0, 0
s_lshl_b32 s4, s15, 8
s_mov_b32 s2, s15
s_waitcnt lgkmcnt(0)
s_cmp_ge_i32 s4, s6
s_cbranch_scc1 .LBB2_3
s_clause 0x1
s_load_b64 s[8:9], s[0:1], 0x0
s_load_b32 s3, s[0:1], 0x10
s_ashr_i32 s5, s4, 31
s_add_i32 s7, s4, 0x100
s_lshl_b64 s[10:11], s[4:5], 2
v_mov_b32_e32 v0, 0
s_min_i32 s5, s7, s6
s_waitcnt lgkmcnt(0)
s_add_u32 s6, s8, s10
s_addc_u32 s7, s9, s11
.LBB2_2:
s_load_b32 s8, s[6:7], 0x0
s_add_i32 s4, s4, 1
s_add_u32 s6, s6, 4
s_addc_u32 s7, s7, 0
s_cmp_ge_i32 s4, s5
s_waitcnt lgkmcnt(0)
v_sub_f32_e64 v1, s8, s3
s_delay_alu instid0(VALU_DEP_1)
v_fmac_f32_e32 v0, v1, v1
s_cbranch_scc0 .LBB2_2
.LBB2_3:
s_load_b64 s[0:1], s[0:1], 0x8
s_mov_b32 s3, 0
v_mov_b32_e32 v1, 0
s_lshl_b64 s[2:3], s[2:3], 2
s_waitcnt lgkmcnt(0)
s_add_u32 s0, s0, s2
s_addc_u32 s1, s1, s3
global_store_b32 v1, v0, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z17standardDeviationPfS_fi
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 24
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 2
.amdhsa_next_free_sgpr 16
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end2:
.size _Z17standardDeviationPfS_fi, .Lfunc_end2-_Z17standardDeviationPfS_fi
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z6addAllPfS_i
.globl _Z6addAllPfS_i
.p2align 8
.type _Z6addAllPfS_i,@function
_Z6addAllPfS_i:
s_load_b32 s3, s[0:1], 0x10
v_mov_b32_e32 v0, 0
s_lshl_b32 s4, s15, 8
s_mov_b32 s2, s15
s_waitcnt lgkmcnt(0)
s_cmp_ge_i32 s4, s3
s_cbranch_scc1 .LBB3_3
s_load_b64 s[6:7], s[0:1], 0x0
s_ashr_i32 s5, s4, 31
s_add_i32 s10, s4, 0x100
s_lshl_b64 s[8:9], s[4:5], 2
v_mov_b32_e32 v0, 0
s_min_i32 s3, s10, s3
s_waitcnt lgkmcnt(0)
s_add_u32 s6, s6, s8
s_addc_u32 s7, s7, s9
.LBB3_2:
s_load_b32 s5, s[6:7], 0x0
s_add_i32 s4, s4, 1
s_add_u32 s6, s6, 4
s_addc_u32 s7, s7, 0
s_cmp_ge_i32 s4, s3
s_waitcnt lgkmcnt(0)
v_add_f32_e32 v0, s5, v0
s_cbranch_scc0 .LBB3_2
.LBB3_3:
s_load_b64 s[0:1], s[0:1], 0x8
s_mov_b32 s3, 0
v_mov_b32_e32 v1, 0
s_lshl_b64 s[2:3], s[2:3], 2
s_waitcnt lgkmcnt(0)
s_add_u32 s0, s0, s2
s_addc_u32 s1, s1, s3
global_store_b32 v1, v0, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z6addAllPfS_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 20
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 2
.amdhsa_next_free_sgpr 16
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end3:
.size _Z6addAllPfS_i, .Lfunc_end3-_Z6addAllPfS_i
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z4pMaxPfS_i
.globl _Z4pMaxPfS_i
.p2align 8
.type _Z4pMaxPfS_i,@function
_Z4pMaxPfS_i:
s_load_b32 s3, s[0:1], 0x10
v_mov_b32_e32 v0, 0xcb18967f
s_lshl_b32 s4, s15, 8
s_mov_b32 s2, s15
s_waitcnt lgkmcnt(0)
s_cmp_ge_i32 s4, s3
s_cbranch_scc1 .LBB4_4
s_load_b64 s[6:7], s[0:1], 0x0
s_ashr_i32 s5, s4, 31
s_add_i32 s10, s4, 0x100
s_lshl_b64 s[8:9], s[4:5], 2
v_mov_b32_e32 v0, 0xff676981
s_min_i32 s3, s10, s3
s_waitcnt lgkmcnt(0)
s_add_u32 s6, s6, s8
s_addc_u32 s7, s7, s9
.LBB4_2:
s_load_b32 s5, s[6:7], 0x0
s_delay_alu instid0(VALU_DEP_1)
v_cvt_f32_i32_e32 v1, v0
s_add_i32 s4, s4, 1
s_add_u32 s6, s6, 4
s_addc_u32 s7, s7, 0
s_cmp_ge_i32 s4, s3
s_waitcnt lgkmcnt(0)
v_cvt_i32_f32_e32 v2, s5
v_cmp_gt_f32_e32 vcc_lo, s5, v1
s_delay_alu instid0(VALU_DEP_2)
v_cndmask_b32_e32 v0, v0, v2, vcc_lo
s_cbranch_scc0 .LBB4_2
s_delay_alu instid0(VALU_DEP_1)
v_cvt_f32_i32_e32 v0, v0
.LBB4_4:
s_load_b64 s[0:1], s[0:1], 0x8
s_mov_b32 s3, 0
v_mov_b32_e32 v1, 0
s_lshl_b64 s[2:3], s[2:3], 2
s_waitcnt lgkmcnt(0)
s_add_u32 s0, s0, s2
s_addc_u32 s1, s1, s3
global_store_b32 v1, v0, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z4pMaxPfS_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 20
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end4:
.size _Z4pMaxPfS_i, .Lfunc_end4-_Z4pMaxPfS_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 20
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z3sumPiS_i
.private_segment_fixed_size: 0
.sgpr_count: 16
.sgpr_spill_count: 0
.symbol: _Z3sumPiS_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 2
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 20
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z4pMinPiS_i
.private_segment_fixed_size: 0
.sgpr_count: 16
.sgpr_spill_count: 0
.symbol: _Z4pMinPiS_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 2
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 24
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z17standardDeviationPfS_fi
.private_segment_fixed_size: 0
.sgpr_count: 16
.sgpr_spill_count: 0
.symbol: _Z17standardDeviationPfS_fi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 2
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 20
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z6addAllPfS_i
.private_segment_fixed_size: 0
.sgpr_count: 16
.sgpr_spill_count: 0
.symbol: _Z6addAllPfS_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 2
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 20
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z4pMaxPfS_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z4pMaxPfS_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <iostream>
#include <cmath>
#include <hip/hip_runtime.h>
#include <chrono>
using namespace std;
using namespace std::chrono;
__global__ void sum(int *arr, int *sumArr, int n){
int start = blockIdx.x * 256;
int sum = 0;
for(int i = start; i < min(start+256,n); i++){
sum += arr[i];
}
sumArr[blockIdx.x] = sum;
}
__global__ void pMin(int *arr, int *minArr, int n){
int start = blockIdx.x * 256;
int minVal = 9999999;
for(int i = start; i < min(start+256,n); i++){
if(arr[i] < minVal){minVal = arr[i];}
}
minArr[blockIdx.x] = minVal;
}
//These 2 functions are for standard deviation
__global__ void standardDeviation(float *arr, float* calcArr, float mean, int n){
int start = blockIdx.x * 256;
float aggregate = 0;
for(int i = start; i < min(start + 256, n); i++){
aggregate = aggregate + ((arr[i] - mean) *(arr[i] - mean));
}
calcArr[blockIdx.x] = aggregate;
}
__global__ void addAll(float *arr, float *calcArr, int n){
int start = blockIdx.x * 256;
float sum = 0;
for(int i = start; i < min(start + 256, n); i++){
sum += arr[i];
}
calcArr[blockIdx.x] = sum;
}
__global__ void pMax(float *arr, float *maxArr, int n){
int start = blockIdx.x * 256;
int maxm = -9999999;
for(int i = start; i < min(start+256,n); i++){
if(arr[i] > maxm){
maxm = arr[i];
}
}
maxArr[blockIdx.x] = maxm;
}
int main(){
int n;
cout << "Enter number of elements: ";
cin >> n;
int *hostArr,*devArr,*devSumArr, *devMinArr;
hostArr = new int[n];
for(int i = 0; i < n; i++){
hostArr[i] = i+1;
}
hipMalloc(&devArr, n * 4);
int blocks = ceil(n * 1.0f/256.0f);
hipMalloc(&devSumArr, blocks * 4);
hipMemcpy(devArr, hostArr, n*4, hipMemcpyHostToDevice);
//Sum
int curr = n;
auto start = high_resolution_clock::now();
while(curr > 1){
sum<<<blocks, 1>>>(devArr, devSumArr, curr);
hipMemcpy(devArr,devSumArr,blocks*4,hipMemcpyDeviceToDevice);
curr = ceil(curr* 1.0f/256.0f);
blocks = ceil(curr*1.0f/256.0f);
}
auto stop = high_resolution_clock::now();
cout << "parallel: " << duration_cast<microseconds>(stop - start).count() << endl;
start = high_resolution_clock::now();
long myVal = 0;
for(int i = 0; i < n; i++){
myVal = myVal + hostArr[i];
}
stop = high_resolution_clock::now();
cout << "serial: " << duration_cast<microseconds>(stop - start).count() << endl;
int sum;
hipMemcpy(&sum,devArr,4,hipMemcpyDeviceToHost);
cout << "Sum: " << sum << endl;
//Min value i
hipMalloc(&devMinArr, blocks * 4);
//Refill device array with values of host array
hipMemcpy(devArr,hostArr,n*4,hipMemcpyHostToDevice);
curr = n;
blocks = ceil(n * 1.0f/256.0f);
while(curr > 1){
pMin<<<blocks, 1>>>(devArr, devMinArr, curr);
hipMemcpy(devArr, devMinArr, blocks*4, hipMemcpyDeviceToDevice);
curr = blocks;
blocks = ceil(curr*1.0f/256.0f);
}
int minVal;
hipMemcpy(&minVal, devArr, 4, hipMemcpyDeviceToHost);
cout << "min val: " << minVal << endl;
float mean = (float)sum/n;
//Standard deviation
float *fDevArr, *fStdSum;
blocks = ceil(n * 1.0f/ 256.0f);
//Create an aggregate array
hipMalloc(&fStdSum, sizeof(float) * blocks);
//Copy mean's value to gpu mean
float *floatHost = new float[n];
for(int i = 0; i < n; i++){
floatHost[i] = (float)hostArr[i];
}
//Allocate device array space in gpu
hipMalloc(&fDevArr, sizeof(float) * n);
//Refill device array with values of host array
hipMemcpy(fDevArr,floatHost, 4 * n, hipMemcpyHostToDevice);
standardDeviation<<<blocks, 1>>>(fDevArr, fStdSum, mean, n);
float *myArr = new float[blocks];
hipMemcpy(myArr, fStdSum, sizeof(float) * blocks, hipMemcpyDeviceToHost);
float total = 0;
for(int i = 0; i < blocks; i++){
total += myArr[i];
}
total /= n;
total = sqrt(total);
cout << "validation standard deviation: " << total << endl;
curr = blocks;
while(curr > 1){
hipMemcpy(fDevArr, fStdSum, curr * sizeof(float), hipMemcpyDeviceToDevice);
blocks = ceil(curr * 1.0f/256.0f);
cout << "blocks for aggregation: " << blocks << endl;
addAll<<<blocks, 1>>>(fDevArr,fStdSum, curr);
curr = blocks;
}
float stdDeviation;
hipMemcpy(&stdDeviation, fStdSum, sizeof(float), hipMemcpyDeviceToHost);
stdDeviation /= n;
stdDeviation = sqrt(stdDeviation);
cout << "Standard deviation: " << stdDeviation << endl;
float *intermediateMax;
blocks = ceil(n * 1.0f/256.0f);
hipMalloc(&intermediateMax, blocks * sizeof(float));
hipMemcpy(fDevArr,floatHost, 4 * n, hipMemcpyHostToDevice);
curr = n;
while(curr > 1){
pMax<<<blocks, 1>>>(fDevArr, intermediateMax, curr);
hipMemcpy(fDevArr, intermediateMax, blocks*sizeof(float), hipMemcpyDeviceToDevice);
float *tempArr = new float[blocks];
hipMemcpy(tempArr, intermediateMax, blocks*sizeof(float), hipMemcpyDeviceToHost);
cout << "Intermediate maximum values: ";
for(int i = 0; i < blocks; i++){
cout << tempArr[i] << " ";
}
cout << endl;
curr = blocks;
blocks = ceil(curr * 1.0f/256.0f);
}
float maxm = 0;
hipMemcpy(&maxm, intermediateMax, sizeof(float), hipMemcpyDeviceToHost);
cout << "Maximum: " << maxm << endl;
} | .text
.file "parallelreduction.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z18__device_stub__sumPiS_i # -- Begin function _Z18__device_stub__sumPiS_i
.p2align 4, 0x90
.type _Z18__device_stub__sumPiS_i,@function
_Z18__device_stub__sumPiS_i: # @_Z18__device_stub__sumPiS_i
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z3sumPiS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z18__device_stub__sumPiS_i, .Lfunc_end0-_Z18__device_stub__sumPiS_i
.cfi_endproc
# -- End function
.globl _Z19__device_stub__pMinPiS_i # -- Begin function _Z19__device_stub__pMinPiS_i
.p2align 4, 0x90
.type _Z19__device_stub__pMinPiS_i,@function
_Z19__device_stub__pMinPiS_i: # @_Z19__device_stub__pMinPiS_i
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z4pMinPiS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end1:
.size _Z19__device_stub__pMinPiS_i, .Lfunc_end1-_Z19__device_stub__pMinPiS_i
.cfi_endproc
# -- End function
.globl _Z32__device_stub__standardDeviationPfS_fi # -- Begin function _Z32__device_stub__standardDeviationPfS_fi
.p2align 4, 0x90
.type _Z32__device_stub__standardDeviationPfS_fi,@function
_Z32__device_stub__standardDeviationPfS_fi: # @_Z32__device_stub__standardDeviationPfS_fi
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movss %xmm0, 12(%rsp)
movl %edx, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z17standardDeviationPfS_fi, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end2:
.size _Z32__device_stub__standardDeviationPfS_fi, .Lfunc_end2-_Z32__device_stub__standardDeviationPfS_fi
.cfi_endproc
# -- End function
.globl _Z21__device_stub__addAllPfS_i # -- Begin function _Z21__device_stub__addAllPfS_i
.p2align 4, 0x90
.type _Z21__device_stub__addAllPfS_i,@function
_Z21__device_stub__addAllPfS_i: # @_Z21__device_stub__addAllPfS_i
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z6addAllPfS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end3:
.size _Z21__device_stub__addAllPfS_i, .Lfunc_end3-_Z21__device_stub__addAllPfS_i
.cfi_endproc
# -- End function
.globl _Z19__device_stub__pMaxPfS_i # -- Begin function _Z19__device_stub__pMaxPfS_i
.p2align 4, 0x90
.type _Z19__device_stub__pMaxPfS_i,@function
_Z19__device_stub__pMaxPfS_i: # @_Z19__device_stub__pMaxPfS_i
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z4pMaxPfS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end4:
.size _Z19__device_stub__pMaxPfS_i, .Lfunc_end4-_Z19__device_stub__pMaxPfS_i
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function main
.LCPI5_0:
.long 0x3b800000 # float 0.00390625
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $200, %rsp
.cfi_def_cfa_offset 256
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $26, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
leaq 4(%rsp), %rsi
movl $_ZSt3cin, %edi
callq _ZNSirsERi
movslq 4(%rsp), %r14
leaq (,%r14,4), %rax
testq %r14, %r14
movq $-1, %rdi
cmovnsq %rax, %rdi
callq _Znam
movq %rax, %rbx
movl %r14d, %eax
testq %r14, %r14
jle .LBB5_3
# %bb.1: # %.lr.ph.preheader
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB5_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
leaq 1(%rcx), %rdx
movl %edx, (%rbx,%rcx,4)
movq %rdx, %rcx
cmpq %rdx, %rax
jne .LBB5_2
.LBB5_3: # %._crit_edge
movabsq $4294967296, %r14 # imm = 0x100000000
shll $2, %eax
movslq %eax, %rsi
leaq 88(%rsp), %rdi
callq hipMalloc
cvtsi2ssl 4(%rsp), %xmm0
mulss .LCPI5_0(%rip), %xmm0
callq ceilf@PLT
cvttss2si %xmm0, %r13d
leal (,%r13,4), %eax
movslq %eax, %rsi
leaq 184(%rsp), %rdi
callq hipMalloc
movq 88(%rsp), %rdi
movslq 4(%rsp), %rdx
shlq $2, %rdx
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movl 4(%rsp), %ebp
callq _ZNSt6chrono3_V212system_clock3nowEv
cmpl $2, %ebp
movq %rax, 104(%rsp) # 8-byte Spill
jl .LBB5_8
# %bb.4:
leaq 1(%r14), %r15
leaq 16(%rsp), %r14
leaq 112(%rsp), %r12
jmp .LBB5_5
.p2align 4, 0x90
.LBB5_7: # in Loop: Header=BB5_5 Depth=1
movq 88(%rsp), %rdi
movq 184(%rsp), %rsi
shll $2, %r13d
movslq %r13d, %rdx
movl $3, %ecx
callq hipMemcpy
xorps %xmm0, %xmm0
cvtsi2ss %ebp, %xmm0
movss .LCPI5_0(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero
mulss %xmm1, %xmm0
callq ceilf@PLT
cvttss2si %xmm0, %ebp
cvttps2dq %xmm0, %xmm0
cvtdq2ps %xmm0, %xmm0
mulss .LCPI5_0(%rip), %xmm0
callq ceilf@PLT
cvttss2si %xmm0, %r13d
cmpl $1, %ebp
jle .LBB5_8
.LBB5_5: # =>This Inner Loop Header: Depth=1
movl %r13d, %edi
movabsq $4294967296, %rax # imm = 0x100000000
orq %rax, %rdi
movl $1, %esi
movq %r15, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB5_7
# %bb.6: # in Loop: Header=BB5_5 Depth=1
movq 88(%rsp), %rax
movq 184(%rsp), %rcx
movq %rax, 72(%rsp)
movq %rcx, 64(%rsp)
movl %ebp, 168(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 168(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
movq %r14, %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
movl $_Z3sumPiS_i, %edi
movq %r12, %r9
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
jmp .LBB5_7
.LBB5_8: # %._crit_edge215
callq _ZNSt6chrono3_V212system_clock3nowEv
movq %rax, %r14
movl $_ZSt4cout, %edi
movl $.L.str.1, %esi
movl $10, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
subq 104(%rsp), %r14 # 8-byte Folded Reload
movabsq $2361183241434822607, %r12 # imm = 0x20C49BA5E353F7CF
movq %r14, %rax
imulq %r12
movq %rdx, %rsi
shrq $63, %rsi
sarq $7, %rdx
addq %rdx, %rsi
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertIlEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %r14
testq %r14, %r14
je .LBB5_77
# %bb.9: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
cmpb $0, 56(%r14)
je .LBB5_11
# %bb.10:
movzbl 67(%r14), %ecx
jmp .LBB5_12
.LBB5_11:
movq %r14, %rdi
movq %rax, %r15
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r14), %rax
movq %r14, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r15, %rax
.LBB5_12: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
callq _ZNSt6chrono3_V212system_clock3nowEv
movq %rax, %r14
callq _ZNSt6chrono3_V212system_clock3nowEv
movq %rax, %r15
movl $_ZSt4cout, %edi
movl $.L.str.2, %esi
movl $8, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
subq %r14, %r15
movq %r15, %rax
imulq %r12
movq %rdx, %rsi
shrq $63, %rsi
sarq $7, %rdx
addq %rdx, %rsi
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertIlEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %r14
testq %r14, %r14
je .LBB5_77
# %bb.13: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i136
cmpb $0, 56(%r14)
je .LBB5_15
# %bb.14:
movzbl 67(%r14), %ecx
jmp .LBB5_16
.LBB5_15:
movq %r14, %rdi
movq %rax, %r15
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r14), %rax
movq %r14, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r15, %rax
.LBB5_16: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit139
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movq 88(%rsp), %rsi
leaq 164(%rsp), %rdi
movl $4, %edx
movl $2, %ecx
callq hipMemcpy
movl $_ZSt4cout, %edi
movl $.L.str.3, %esi
movl $5, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl 164(%rsp), %esi
movl $_ZSt4cout, %edi
callq _ZNSolsEi
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %r14
testq %r14, %r14
je .LBB5_77
# %bb.17: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i141
cmpb $0, 56(%r14)
je .LBB5_19
# %bb.18:
movzbl 67(%r14), %ecx
jmp .LBB5_20
.LBB5_19:
movq %r14, %rdi
movq %rax, %r15
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r14), %rax
movq %r14, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r15, %rax
.LBB5_20: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit144
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
shll $2, %r13d
movslq %r13d, %rsi
leaq 168(%rsp), %rdi
callq hipMalloc
movq 88(%rsp), %rdi
movslq 4(%rsp), %rdx
shlq $2, %rdx
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movl 4(%rsp), %r13d
cmpl $2, %r13d
jl .LBB5_25
# %bb.21: # %.lr.ph219
cvtsi2ss %r13d, %xmm0
mulss .LCPI5_0(%rip), %xmm0
callq ceilf@PLT
movabsq $4294967296, %rax # imm = 0x100000000
leaq 1(%rax), %r14
leaq 16(%rsp), %rbp
leaq 112(%rsp), %r15
jmp .LBB5_22
.p2align 4, 0x90
.LBB5_24: # in Loop: Header=BB5_22 Depth=1
movq 88(%rsp), %rdi
movq 168(%rsp), %rsi
leal (,%r12,4), %eax
movslq %eax, %rdx
movl $3, %ecx
callq hipMemcpy
xorps %xmm0, %xmm0
cvtsi2ss %r12d, %xmm0
mulss .LCPI5_0(%rip), %xmm0
callq ceilf@PLT
movl %r12d, %r13d
cmpl $1, %r12d
jle .LBB5_25
.LBB5_22: # =>This Inner Loop Header: Depth=1
cvttss2si %xmm0, %r12d
movq %r12, %rdi
movabsq $4294967296, %rax # imm = 0x100000000
orq %rax, %rdi
movl $1, %esi
movq %r14, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB5_24
# %bb.23: # in Loop: Header=BB5_22 Depth=1
movq 88(%rsp), %rax
movq 168(%rsp), %rcx
movq %rax, 72(%rsp)
movq %rcx, 64(%rsp)
movl %r13d, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 8(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
movq %rbp, %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
movl $_Z4pMinPiS_i, %edi
movq %r15, %r9
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
jmp .LBB5_24
.LBB5_25: # %._crit_edge220
movq 88(%rsp), %rsi
leaq 180(%rsp), %rdi
movl $4, %edx
movl $2, %ecx
callq hipMemcpy
movl $_ZSt4cout, %edi
movl $.L.str.4, %esi
movl $9, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl 180(%rsp), %esi
movl $_ZSt4cout, %edi
callq _ZNSolsEi
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %r14
testq %r14, %r14
je .LBB5_77
# %bb.26: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i146
cmpb $0, 56(%r14)
je .LBB5_28
# %bb.27:
movzbl 67(%r14), %ecx
jmp .LBB5_29
.LBB5_28:
movq %r14, %rdi
movq %rax, %r15
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r14), %rax
movq %r14, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r15, %rax
.LBB5_29: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit149
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
xorps %xmm0, %xmm0
cvtsi2ssl 164(%rsp), %xmm0
movss %xmm0, 104(%rsp) # 4-byte Spill
cvtsi2ssl 4(%rsp), %xmm1
movss .LCPI5_0(%rip), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss %xmm1, 176(%rsp) # 4-byte Spill
mulss %xmm1, %xmm0
callq ceilf@PLT
cvttss2si %xmm0, %r14d
movslq %r14d, %r15
shlq $2, %r15
leaq 152(%rsp), %rdi
movq %r15, %rsi
callq hipMalloc
movslq 4(%rsp), %r13
leaq (,%r13,4), %r12
testq %r13, %r13
movq $-1, %rdi
cmovnsq %r12, %rdi
callq _Znam
testq %r13, %r13
jle .LBB5_32
# %bb.30: # %.lr.ph222.preheader
movl %r13d, %edx
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB5_31: # %.lr.ph222
# =>This Inner Loop Header: Depth=1
xorps %xmm0, %xmm0
cvtsi2ssl (%rbx,%rcx,4), %xmm0
movss %xmm0, (%rax,%rcx,4)
incq %rcx
cmpq %rcx, %rdx
jne .LBB5_31
.LBB5_32: # %._crit_edge223
leaq 8(%rsp), %rdi
movq %rax, %rbx
movq %r12, %rsi
callq hipMalloc
movq 8(%rsp), %rdi
movslq 4(%rsp), %rdx
shlq $2, %rdx
movq %rbx, 192(%rsp) # 8-byte Spill
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movl %r14d, %r13d
movq %r13, %rdi
movabsq $4294967296, %rax # imm = 0x100000000
orq %rax, %rdi
leaq 1(%rax), %rbx
movl $1, %esi
movq %rbx, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB5_34
# %bb.33:
movss 104(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
divss 176(%rsp), %xmm0 # 4-byte Folded Reload
movq 8(%rsp), %rax
movq 152(%rsp), %rcx
movl 4(%rsp), %edx
movq %rax, 72(%rsp)
movq %rcx, 64(%rsp)
movss %xmm0, 80(%rsp)
movl %edx, 100(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 80(%rsp), %rax
movq %rax, 128(%rsp)
leaq 100(%rsp), %rax
movq %rax, 136(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z17standardDeviationPfS_fi, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB5_34:
testl %r14d, %r14d
movq $-1, %rdi
cmovnsq %r15, %rdi
callq _Znam
movq %rax, %r12
movq 152(%rsp), %rsi
movq %rax, %rdi
movq %r15, %rdx
movl $2, %ecx
callq hipMemcpy
xorps %xmm1, %xmm1
xorps %xmm0, %xmm0
testl %r14d, %r14d
jle .LBB5_37
# %bb.35: # %.lr.ph227.preheader
xorl %eax, %eax
.p2align 4, 0x90
.LBB5_36: # %.lr.ph227
# =>This Inner Loop Header: Depth=1
addss (%r12,%rax,4), %xmm0
incq %rax
cmpq %rax, %r13
jne .LBB5_36
.LBB5_37: # %._crit_edge228
cvtsi2ssl 4(%rsp), %xmm2
divss %xmm2, %xmm0
ucomiss %xmm1, %xmm0
jb .LBB5_39
# %bb.38:
sqrtss %xmm0, %xmm0
jmp .LBB5_40
.LBB5_39: # %call.sqrt
callq sqrtf
.LBB5_40: # %._crit_edge228.split
movss %xmm0, 104(%rsp) # 4-byte Spill
movl $_ZSt4cout, %edi
movl $.L.str.5, %esi
movl $31, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movss 104(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertIdEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %r15
testq %r15, %r15
je .LBB5_77
# %bb.41: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i151
cmpb $0, 56(%r15)
je .LBB5_43
# %bb.42:
movzbl 67(%r15), %ecx
jmp .LBB5_44
.LBB5_43:
movq %r15, %rdi
movq %rax, %r12
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r15), %rax
movq %r15, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r12, %rax
.LBB5_44: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit154
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
cmpl $2, %r14d
jl .LBB5_53
# %bb.45:
leaq 112(%rsp), %r15
jmp .LBB5_46
.p2align 4, 0x90
.LBB5_52: # in Loop: Header=BB5_46 Depth=1
movl %r12d, %r14d
cmpl $1, %r12d
jle .LBB5_53
.LBB5_46: # =>This Inner Loop Header: Depth=1
movq 8(%rsp), %rdi
movq 152(%rsp), %rsi
movl %r14d, %edx
shlq $2, %rdx
movl $3, %ecx
callq hipMemcpy
xorps %xmm0, %xmm0
cvtsi2ss %r14d, %xmm0
mulss .LCPI5_0(%rip), %xmm0
callq ceilf@PLT
cvttss2si %xmm0, %r12d
movl $_ZSt4cout, %edi
movl $.L.str.6, %esi
movl $24, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movl %r12d, %esi
callq _ZNSolsEi
movq %rax, %r13
movq (%rax), %rax
movq -24(%rax), %rax
movq 240(%r13,%rax), %rbp
testq %rbp, %rbp
je .LBB5_77
# %bb.47: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i156
# in Loop: Header=BB5_46 Depth=1
cmpb $0, 56(%rbp)
je .LBB5_49
# %bb.48: # in Loop: Header=BB5_46 Depth=1
movzbl 67(%rbp), %eax
jmp .LBB5_50
.p2align 4, 0x90
.LBB5_49: # in Loop: Header=BB5_46 Depth=1
movq %rbp, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbp), %rax
movq %rbp, %rdi
movl $10, %esi
callq *48(%rax)
.LBB5_50: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit159
# in Loop: Header=BB5_46 Depth=1
movsbl %al, %esi
movq %r13, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movl %r12d, %edi
movabsq $4294967296, %rax # imm = 0x100000000
orq %rax, %rdi
movl $1, %esi
movq %rbx, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB5_52
# %bb.51: # in Loop: Header=BB5_46 Depth=1
movq 8(%rsp), %rax
movq 152(%rsp), %rcx
movq %rax, 72(%rsp)
movq %rcx, 64(%rsp)
movl %r14d, 80(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 80(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
movl $_Z6addAllPfS_i, %edi
movq %r15, %r9
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
jmp .LBB5_52
.LBB5_53: # %._crit_edge232
movq 152(%rsp), %rsi
leaq 96(%rsp), %rdi
movl $4, %edx
movl $2, %ecx
callq hipMemcpy
xorps %xmm1, %xmm1
cvtsi2ssl 4(%rsp), %xmm1
movss 96(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
divss %xmm1, %xmm0
xorps %xmm1, %xmm1
ucomiss %xmm1, %xmm0
jb .LBB5_55
# %bb.54:
sqrtss %xmm0, %xmm0
jmp .LBB5_56
.LBB5_55: # %call.sqrt269
callq sqrtf
.LBB5_56: # %._crit_edge232.split
movss %xmm0, 96(%rsp)
movl $_ZSt4cout, %edi
movl $.L.str.7, %esi
movl $20, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movss 96(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertIdEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %r15
testq %r15, %r15
je .LBB5_77
# %bb.57: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i161
cmpb $0, 56(%r15)
je .LBB5_59
# %bb.58:
movzbl 67(%r15), %ecx
jmp .LBB5_60
.LBB5_59:
movq %r15, %rdi
movq %rax, %r14
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r15), %rax
movq %r15, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r14, %rax
.LBB5_60: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit164
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
xorps %xmm0, %xmm0
cvtsi2ssl 4(%rsp), %xmm0
mulss .LCPI5_0(%rip), %xmm0
callq ceilf@PLT
cvttss2si %xmm0, %ebp
movslq %ebp, %rsi
shlq $2, %rsi
leaq 80(%rsp), %rdi
callq hipMalloc
movq 8(%rsp), %rdi
movslq 4(%rsp), %rdx
shlq $2, %rdx
movq 192(%rsp), %rsi # 8-byte Reload
movl $1, %ecx
callq hipMemcpy
movl 4(%rsp), %r14d
cmpl $2, %r14d
jl .LBB5_72
# %bb.61: # %.lr.ph239
movq $-1, %r12
jmp .LBB5_62
.p2align 4, 0x90
.LBB5_70: # in Loop: Header=BB5_62 Depth=1
movq %r14, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r14), %rax
movq %r14, %rdi
movl $10, %esi
callq *48(%rax)
.LBB5_71: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit169
# in Loop: Header=BB5_62 Depth=1
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
xorps %xmm0, %xmm0
cvtsi2ss %r13d, %xmm0
mulss .LCPI5_0(%rip), %xmm0
callq ceilf@PLT
cvttss2si %xmm0, %ebp
movl %r13d, %r14d
cmpl $1, %r13d
jle .LBB5_72
.LBB5_62: # =>This Loop Header: Depth=1
# Child Loop BB5_66 Depth 2
movl %ebp, %r13d
movl %ebp, %ebp
movq %rbp, %rdi
movabsq $4294967296, %rax # imm = 0x100000000
orq %rax, %rdi
movl $1, %esi
movq %rbx, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB5_64
# %bb.63: # in Loop: Header=BB5_62 Depth=1
movq 8(%rsp), %rax
movq 80(%rsp), %rcx
movq %rax, 72(%rsp)
movq %rcx, 64(%rsp)
movl %r14d, 100(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 100(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
movl $_Z4pMaxPfS_i, %edi
leaq 112(%rsp), %r9
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB5_64: # in Loop: Header=BB5_62 Depth=1
movq 8(%rsp), %rdi
movq 80(%rsp), %rsi
movslq %r13d, %r15
shlq $2, %r15
movq %r15, %rdx
movl $3, %ecx
callq hipMemcpy
testl %r13d, %r13d
movq %r15, %rdi
cmovsq %r12, %rdi
callq _Znam
movq %rax, %r14
movq 80(%rsp), %rsi
movq %rax, %rdi
movq %r15, %rdx
movl $2, %ecx
callq hipMemcpy
movl $_ZSt4cout, %edi
movl $.L.str.8, %esi
movl $29, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
testl %r13d, %r13d
jle .LBB5_67
# %bb.65: # %.lr.ph235.preheader
# in Loop: Header=BB5_62 Depth=1
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB5_66: # %.lr.ph235
# Parent Loop BB5_62 Depth=1
# => This Inner Loop Header: Depth=2
movss (%r14,%r15,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertIdEERSoT_
movl $.L.str.9, %esi
movl $1, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
incq %r15
cmpq %r15, %rbp
jne .LBB5_66
.LBB5_67: # %._crit_edge236
# in Loop: Header=BB5_62 Depth=1
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %r14
testq %r14, %r14
je .LBB5_77
# %bb.68: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i166
# in Loop: Header=BB5_62 Depth=1
cmpb $0, 56(%r14)
je .LBB5_70
# %bb.69: # in Loop: Header=BB5_62 Depth=1
movzbl 67(%r14), %eax
jmp .LBB5_71
.LBB5_72: # %._crit_edge240
movl $0, 112(%rsp)
movq 80(%rsp), %rsi
leaq 112(%rsp), %rdi
movl $4, %edx
movl $2, %ecx
callq hipMemcpy
movl $_ZSt4cout, %edi
movl $.L.str.10, %esi
movl $9, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movss 112(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertIdEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbx
testq %rbx, %rbx
je .LBB5_77
# %bb.73: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i171
cmpb $0, 56(%rbx)
je .LBB5_75
# %bb.74:
movzbl 67(%rbx), %ecx
jmp .LBB5_76
.LBB5_75:
movq %rbx, %rdi
movq %rax, %r14
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r14, %rax
.LBB5_76: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit174
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
xorl %eax, %eax
addq $200, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB5_77:
.cfi_def_cfa_offset 256
callq _ZSt16__throw_bad_castv
.Lfunc_end5:
.size main, .Lfunc_end5-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB6_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB6_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z3sumPiS_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z4pMinPiS_i, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z17standardDeviationPfS_fi, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z6addAllPfS_i, %esi
movl $.L__unnamed_4, %edx
movl $.L__unnamed_4, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z4pMaxPfS_i, %esi
movl $.L__unnamed_5, %edx
movl $.L__unnamed_5, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end6:
.size __hip_module_ctor, .Lfunc_end6-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB7_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB7_2:
retq
.Lfunc_end7:
.size __hip_module_dtor, .Lfunc_end7-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z3sumPiS_i,@object # @_Z3sumPiS_i
.section .rodata,"a",@progbits
.globl _Z3sumPiS_i
.p2align 3, 0x0
_Z3sumPiS_i:
.quad _Z18__device_stub__sumPiS_i
.size _Z3sumPiS_i, 8
.type _Z4pMinPiS_i,@object # @_Z4pMinPiS_i
.globl _Z4pMinPiS_i
.p2align 3, 0x0
_Z4pMinPiS_i:
.quad _Z19__device_stub__pMinPiS_i
.size _Z4pMinPiS_i, 8
.type _Z17standardDeviationPfS_fi,@object # @_Z17standardDeviationPfS_fi
.globl _Z17standardDeviationPfS_fi
.p2align 3, 0x0
_Z17standardDeviationPfS_fi:
.quad _Z32__device_stub__standardDeviationPfS_fi
.size _Z17standardDeviationPfS_fi, 8
.type _Z6addAllPfS_i,@object # @_Z6addAllPfS_i
.globl _Z6addAllPfS_i
.p2align 3, 0x0
_Z6addAllPfS_i:
.quad _Z21__device_stub__addAllPfS_i
.size _Z6addAllPfS_i, 8
.type _Z4pMaxPfS_i,@object # @_Z4pMaxPfS_i
.globl _Z4pMaxPfS_i
.p2align 3, 0x0
_Z4pMaxPfS_i:
.quad _Z19__device_stub__pMaxPfS_i
.size _Z4pMaxPfS_i, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Enter number of elements: "
.size .L.str, 27
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "parallel: "
.size .L.str.1, 11
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "serial: "
.size .L.str.2, 9
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "Sum: "
.size .L.str.3, 6
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "min val: "
.size .L.str.4, 10
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "validation standard deviation: "
.size .L.str.5, 32
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "blocks for aggregation: "
.size .L.str.6, 25
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "Standard deviation: "
.size .L.str.7, 21
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "Intermediate maximum values: "
.size .L.str.8, 30
.type .L.str.9,@object # @.str.9
.L.str.9:
.asciz " "
.size .L.str.9, 2
.type .L.str.10,@object # @.str.10
.L.str.10:
.asciz "Maximum: "
.size .L.str.10, 10
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z3sumPiS_i"
.size .L__unnamed_1, 12
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z4pMinPiS_i"
.size .L__unnamed_2, 13
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "_Z17standardDeviationPfS_fi"
.size .L__unnamed_3, 28
.type .L__unnamed_4,@object # @3
.L__unnamed_4:
.asciz "_Z6addAllPfS_i"
.size .L__unnamed_4, 15
.type .L__unnamed_5,@object # @4
.L__unnamed_5:
.asciz "_Z4pMaxPfS_i"
.size .L__unnamed_5, 13
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z18__device_stub__sumPiS_i
.addrsig_sym _Z19__device_stub__pMinPiS_i
.addrsig_sym _Z32__device_stub__standardDeviationPfS_fi
.addrsig_sym _Z21__device_stub__addAllPfS_i
.addrsig_sym _Z19__device_stub__pMaxPfS_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z3sumPiS_i
.addrsig_sym _Z4pMinPiS_i
.addrsig_sym _Z17standardDeviationPfS_fi
.addrsig_sym _Z6addAllPfS_i
.addrsig_sym _Z4pMaxPfS_i
.addrsig_sym _ZSt4cout
.addrsig_sym _ZSt3cin
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | /**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <stdio.h>
#include <stdlib.h>
static const int VECTOR_SIZE = 32;
static const int WORK_SIZE = 16;
/**
* This macro checks return value of the CUDA runtime call and exits
* the application if the call failed.
*/
#define CUDA_CHECK_RETURN(value) { \
cudaError_t _m_cudaStat = value; \
if (_m_cudaStat != cudaSuccess) { \
fprintf(stderr, "Error %s at line %d in file %s\n", \
cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \
exit(1); \
} }
__device__ int scan_warp(int *ptr, const unsigned int idx = threadIdx.x) {
const unsigned int aux = idx & 31;
if(aux >= 1) ptr[idx] = ptr[idx - 1] + ptr[idx];
if(aux >= 2) ptr[idx] = ptr[idx - 2] + ptr[idx];
if(aux >= 4) ptr[idx] = ptr[idx - 4] + ptr[idx];
if(aux >= 8) ptr[idx] = ptr[idx - 8] + ptr[idx];
if(aux >= 16) ptr[idx] = ptr[idx - 16] + ptr[idx];
return ptr[idx];
}
__device__ int scan_block(int *ptr, const unsigned int idx = threadIdx.x) {
const unsigned int aux = idx & 31;
const unsigned int warp_id = idx >> 5;
__shared__ int temp[32];
int val = scan_warp(ptr, idx);
__syncthreads();
if(aux == 31) temp[warp_id] = ptr[idx];
__syncthreads();
if(warp_id == 0) scan_warp(temp, idx);
__syncthreads();
if(warp_id > 0) val = temp[warp_id - 1] + val;
__syncthreads();
ptr[idx] = val;
__syncthreads();
return val;
}
__global__ void scan_global(int *ptr) {
const unsigned int idx = threadIdx.x;
const unsigned int aux = idx & blockDim.x;
const unsigned int bi = blockIdx.x;
extern __shared__ int temp[];
int val = scan_block(ptr, idx);
__syncthreads();
if(aux == blockDim.x) temp[bi] = ptr[idx];
__syncthreads();
if(bi == 0) scan_block(temp, idx);
__syncthreads();
if(bi > 0) val = temp[bi - 1] + val;
__syncthreads();
ptr[idx] = val;
__syncthreads();
return;
}
/**
* Host function that prepares data array and passes it to the CUDA kernel.
*/
int main(void) {
int *d = NULL;
int i;
unsigned int GRID_SIZE = (1 + VECTOR_SIZE) / WORK_SIZE;
unsigned int idata[VECTOR_SIZE], odata[VECTOR_SIZE];
for (i = 0; i < VECTOR_SIZE; i++)
idata[i] = (unsigned int) i;
CUDA_CHECK_RETURN(cudaMalloc((void**) &d, sizeof(int) * VECTOR_SIZE));
CUDA_CHECK_RETURN(
cudaMemcpy(d, idata, sizeof(int) * VECTOR_SIZE, cudaMemcpyHostToDevice));
scan_global<<<GRID_SIZE, WORK_SIZE, WORK_SIZE * sizeof(int)>>>(d);
CUDA_CHECK_RETURN(cudaDeviceSynchronize()); // Wait for the GPU launched work to complete
CUDA_CHECK_RETURN(cudaGetLastError());
CUDA_CHECK_RETURN(cudaMemcpy(odata, d, sizeof(int) * VECTOR_SIZE, cudaMemcpyDeviceToHost));
unsigned int sum = idata[0];
for (i = 0; i < VECTOR_SIZE - 1; i++) {
if(odata[i] != sum) {
printf("Erro !!!");
}
if((i + 1) < VECTOR_SIZE) {
sum += idata[i+1];
}
printf("Input value: %u, device output: %u\n", idata[i], odata[i]);
}
if(odata[VECTOR_SIZE - 1] != sum) {
printf("Erro !!!");
}
printf("Input value: %u, device output: %u\n", idata[i], odata[VECTOR_SIZE - 1]);
CUDA_CHECK_RETURN(cudaFree((void*) d));
CUDA_CHECK_RETURN(cudaDeviceReset());
return 0;
} | code for sm_80
Function : _Z11scan_globalPi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e220000002100 */
/*0020*/ IMAD.MOV.U32 R5, RZ, RZ, 0x4 ; /* 0x00000004ff057424 */
/* 0x000fe200078e00ff */
/*0030*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fe20000000a00 */
/*0040*/ LOP3.LUT P5, R10, R0.reuse, 0x1f, RZ, 0xc0, !PT ; /* 0x0000001f000a7812 */
/* 0x041fe400078ac0ff */
/*0050*/ IMAD.WIDE.U32 R2, R0, R5, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fd600078e0005 */
/*0060*/ @P5 IADD3 R6, R0, -0x1, RZ ; /* 0xffffffff00065810 */
/* 0x000fe20007ffe0ff */
/*0070*/ @P5 LDG.E R4, [R2.64] ; /* 0x0000000602045981 */
/* 0x000ea8000c1e1900 */
/*0080*/ @P5 IMAD.WIDE.U32 R6, R6, R5, c[0x0][0x160] ; /* 0x0000580006065625 */
/* 0x000fcc00078e0005 */
/*0090*/ @P5 LDG.E R7, [R6.64] ; /* 0x0000000606075981 */
/* 0x000ea2000c1e1900 */
/*00a0*/ ISETP.GE.U32.AND P4, PT, R10, 0x2, PT ; /* 0x000000020a00780c */
/* 0x000fda0003f86070 */
/*00b0*/ @P4 IADD3 R8, R0, -0x2, RZ ; /* 0xfffffffe00084810 */
/* 0x000fca0007ffe0ff */
/*00c0*/ @P4 IMAD.WIDE.U32 R8, R8, R5, c[0x0][0x160] ; /* 0x0000580008084625 */
/* 0x000fc800078e0005 */
/*00d0*/ @P5 IMAD.IADD R11, R4, 0x1, R7 ; /* 0x00000001040b5824 */
/* 0x004fca00078e0207 */
/*00e0*/ @P5 STG.E [R2.64], R11 ; /* 0x0000000b02005986 */
/* 0x0001e8000c101906 */
/*00f0*/ @P4 LDG.E R8, [R8.64] ; /* 0x0000000608084981 */
/* 0x000ea8000c1e1900 */
/*0100*/ @P4 LDG.E R13, [R2.64] ; /* 0x00000006020d4981 */
/* 0x000ea2000c1e1900 */
/*0110*/ ISETP.GE.U32.AND P3, PT, R10, 0x4, PT ; /* 0x000000040a00780c */
/* 0x000fda0003f66070 */
/*0120*/ @P3 IADD3 R4, R0, -0x4, RZ ; /* 0xfffffffc00043810 */
/* 0x000fca0007ffe0ff */
/*0130*/ @P3 IMAD.WIDE.U32 R6, R4, R5, c[0x0][0x160] ; /* 0x0000580004063625 */
/* 0x000fc800078e0005 */
/*0140*/ @P4 IMAD.IADD R13, R8, 0x1, R13 ; /* 0x00000001080d4824 */
/* 0x004fca00078e020d */
/*0150*/ @P4 STG.E [R2.64], R13 ; /* 0x0000000d02004986 */
/* 0x0003e8000c101906 */
/*0160*/ @P3 LDG.E R6, [R6.64] ; /* 0x0000000606063981 */
/* 0x000e28000c1e1900 */
/*0170*/ @P3 LDG.E R15, [R2.64] ; /* 0x00000006020f3981 */
/* 0x000e22000c1e1900 */
/*0180*/ ISETP.GE.U32.AND P2, PT, R10, 0x8, PT ; /* 0x000000080a00780c */
/* 0x000fda0003f46070 */
/*0190*/ @P2 IADD3 R4, R0, -0x8, RZ ; /* 0xfffffff800042810 */
/* 0x000fca0007ffe0ff */
/*01a0*/ @P2 IMAD.WIDE.U32 R8, R4, R5, c[0x0][0x160] ; /* 0x0000580004082625 */
/* 0x000fc800078e0005 */
/*01b0*/ @P3 IMAD.IADD R11, R6, 0x1, R15 ; /* 0x00000001060b3824 */
/* 0x001fca00078e020f */
/*01c0*/ @P3 STG.E [R2.64], R11 ; /* 0x0000000b02003986 */
/* 0x0001e8000c101906 */
/*01d0*/ @P2 LDG.E R8, [R8.64] ; /* 0x0000000608082981 */
/* 0x000e68000c1e1900 */
/*01e0*/ @P2 LDG.E R15, [R2.64] ; /* 0x00000006020f2981 */
/* 0x000e62000c1e1900 */
/*01f0*/ ISETP.GT.U32.AND P1, PT, R10, 0xf, PT ; /* 0x0000000f0a00780c */
/* 0x000fda0003f24070 */
/*0200*/ @P1 IADD3 R4, R0, -0x10, RZ ; /* 0xfffffff000041810 */
/* 0x000fca0007ffe0ff */
/*0210*/ @P1 IMAD.WIDE.U32 R6, R4, R5, c[0x0][0x160] ; /* 0x0000580004061625 */
/* 0x000fc800078e0005 */
/*0220*/ @P2 IMAD.IADD R13, R8, 0x1, R15 ; /* 0x00000001080d2824 */
/* 0x002fca00078e020f */
/*0230*/ @P2 STG.E [R2.64], R13 ; /* 0x0000000d02002986 */
/* 0x0001e8000c101906 */
/*0240*/ @P1 LDG.E R6, [R6.64] ; /* 0x0000000606061981 */
/* 0x000ea8000c1e1900 */
/*0250*/ @!P1 LDG.E R5, [R2.64] ; /* 0x0000000602059981 */
/* 0x000168000c1e1900 */
/*0260*/ @P1 LDG.E R15, [R2.64] ; /* 0x00000006020f1981 */
/* 0x000ea2000c1e1900 */
/*0270*/ ISETP.NE.AND P6, PT, R10, 0x1f, PT ; /* 0x0000001f0a00780c */
/* 0x000fe20003fc5270 */
/*0280*/ @P1 IMAD.IADD R5, R6, 0x1, R15 ; /* 0x0000000106051824 */
/* 0x004fca00078e020f */
/*0290*/ @P1 STG.E [R2.64], R5 ; /* 0x0000000502001986 */
/* 0x0001e8000c101906 */
/*02a0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*02b0*/ @!P6 LDG.E R9, [R2.64] ; /* 0x000000060209e981 */
/* 0x000ea2000c1e1900 */
/*02c0*/ P2R R4, PR, RZ, 0x40 ; /* 0x00000040ff047803 */
/* 0x000fc40000000000 */
/*02d0*/ SHF.R.U32.HI R4, RZ, 0x5, R0 ; /* 0x00000005ff047819 */
/* 0x000fc80000011600 */
/*02e0*/ ISETP.NE.AND P0, PT, R4, RZ, PT ; /* 0x000000ff0400720c */
/* 0x000fe20003f05270 */
/*02f0*/ S2UR UR5, SR_CTAID.X ; /* 0x00000000000579c3 */
/* 0x000e620000002500 */
/*0300*/ BSSY B0, 0x4b0 ; /* 0x000001a000007945 */
/* 0x000fe40003800000 */
/*0310*/ P2R R6, PR, RZ, 0x1 ; /* 0x00000001ff067803 */
/* 0x000fe20000000000 */
/*0320*/ @!P6 STS [R4.X4], R9 ; /* 0x000000090400e388 */
/* 0x0041e80000004800 */
/*0330*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0340*/ @P0 BRA 0x4a0 ; /* 0x0000015000000947 */
/* 0x000fea0003800000 */
/*0350*/ @P5 LDS R6, [R0.X4] ; /* 0x0000000000065984 */
/* 0x003fe20000004800 */
/*0360*/ ISETP.GE.U32.AND P0, PT, R10, 0x10, PT ; /* 0x000000100a00780c */
/* 0x000fc60003f06070 */
/*0370*/ @P5 LDS R7, [R0.X4+-0x4] ; /* 0xfffffc0000075984 */
/* 0x000e240000004800 */
/*0380*/ @P5 IMAD.IADD R7, R6, 0x1, R7 ; /* 0x0000000106075824 */
/* 0x001fe400078e0207 */
/*0390*/ @P4 LDS R6, [R0.X4+-0x8] ; /* 0xfffff80000064984 */
/* 0x000fe80000004800 */
/*03a0*/ @P5 STS [R0.X4], R7 ; /* 0x0000000700005388 */
/* 0x000fe80000004800 */
/*03b0*/ @P4 LDS R9, [R0.X4] ; /* 0x0000000000094984 */
/* 0x000e240000004800 */
/*03c0*/ @P4 IADD3 R9, R6, R9, RZ ; /* 0x0000000906094210 */
/* 0x001fc40007ffe0ff */
/*03d0*/ @P3 LDS R6, [R0.X4+-0x10] ; /* 0xfffff00000063984 */
/* 0x000fe80000004800 */
/*03e0*/ @P4 STS [R0.X4], R9 ; /* 0x0000000900004388 */
/* 0x000fe80000004800 */
/*03f0*/ @P3 LDS R11, [R0.X4] ; /* 0x00000000000b3984 */
/* 0x000e240000004800 */
/*0400*/ @P3 IMAD.IADD R11, R6, 0x1, R11 ; /* 0x00000001060b3824 */
/* 0x001fc400078e020b */
/*0410*/ @P2 LDS R6, [R0.X4+-0x20] ; /* 0xffffe00000062984 */
/* 0x000fe80000004800 */
/*0420*/ @P3 STS [R0.X4], R11 ; /* 0x0000000b00003388 */
/* 0x000fe80000004800 */
/*0430*/ @P2 LDS R13, [R0.X4] ; /* 0x00000000000d2984 */
/* 0x000e240000004800 */
/*0440*/ @P2 IMAD.IADD R13, R6, 0x1, R13 ; /* 0x00000001060d2824 */
/* 0x001fc400078e020d */
/*0450*/ @P0 LDS R6, [R0.X4+-0x40] ; /* 0xffffc00000060984 */
/* 0x000fe80000004800 */
/*0460*/ @P2 STS [R0.X4], R13 ; /* 0x0000000d00002388 */
/* 0x000fe80000004800 */
/*0470*/ @P0 LDS R7, [R0.X4] ; /* 0x0000000000070984 */
/* 0x000e240000004800 */
/*0480*/ @P0 IMAD.IADD R7, R6, 0x1, R7 ; /* 0x0000000106070824 */
/* 0x001fca00078e0207 */
/*0490*/ @P0 STS [R0.X4], R7 ; /* 0x0000000700000388 */
/* 0x0001e40000004800 */
/*04a0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x003fea0003800000 */
/*04b0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*04c0*/ ISETP.NE.AND P0, PT, R4, RZ, PT ; /* 0x000000ff0400720c */
/* 0x000fe20003f05270 */
/*04d0*/ ULEA UR4, UR5, 0x80, 0x2 ; /* 0x0000008005047891 */
/* 0x000fc8000f8e103f */
/*04e0*/ BSSY B0, 0x5b0 ; /* 0x000000c000007945 */
/* 0x000ff00003800000 */
/*04f0*/ @P0 LDS R6, [R4.X4+-0x4] ; /* 0xfffffc0004060984 */
/* 0x000e240000004800 */
/*0500*/ @P0 IMAD.IADD R5, R5, 0x1, R6 ; /* 0x0000000105050824 */
/* 0x021fc400078e0206 */
/*0510*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0520*/ LOP3.LUT R6, R0, c[0x0][0x0], RZ, 0xc0, !PT ; /* 0x0000000000067a12 */
/* 0x000fc800078ec0ff */
/*0530*/ ISETP.NE.AND P6, PT, R6, c[0x0][0x0], PT ; /* 0x0000000006007a0c */
/* 0x000fe20003fc5270 */
/*0540*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x0001e8000c101906 */
/*0550*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0560*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0570*/ @P6 BRA 0x5a0 ; /* 0x0000002000006947 */
/* 0x000fea0003800000 */
/*0580*/ LDG.E R6, [R2.64] ; /* 0x0000000602067981 */
/* 0x001ea8000c1e1900 */
/*0590*/ STS [UR4], R6 ; /* 0x00000006ff007988 */
/* 0x0041e40008000804 */
/*05a0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x001fea0003800000 */
/*05b0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*05c0*/ PLOP3.LUT P6, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fc80003fce170 */
/*05d0*/ P2R R6, PR, RZ, 0x40 ; /* 0x00000040ff067803 */
/* 0x000fe40000000000 */
/*05e0*/ ISETP.NE.AND P6, PT, RZ, UR5, PT ; /* 0x00000005ff007c0c */
/* 0x000fda000bfc5270 */
/*05f0*/ @P6 BRA 0x9c0 ; /* 0x000003c000006947 */
/* 0x000fea0003800000 */
/*0600*/ @P5 LDS R6, [R0.X4+0x80] ; /* 0x0000800000065984 */
/* 0x000fe20000004800 */
/*0610*/ WARPSYNC 0xffffffff ; /* 0xffffffff00007948 */
/* 0x000fe20003800000 */
/*0620*/ ISETP.NE.AND P6, PT, R4, RZ, PT ; /* 0x000000ff0400720c */
/* 0x000fe20003fc5270 */
/*0630*/ BSSY B0, 0x940 ; /* 0x0000030000007945 */
/* 0x000fe20003800000 */
/*0640*/ @P5 LDS R7, [R0.X4+0x7c] ; /* 0x00007c0000075984 */
/* 0x000e240000004800 */
/*0650*/ @P5 IMAD.IADD R7, R6, 0x1, R7 ; /* 0x0000000106075824 */
/* 0x001fe400078e0207 */
/*0660*/ @P4 LDS R6, [R0.X4+0x78] ; /* 0x0000780000064984 */
/* 0x000fe80000004800 */
/*0670*/ @P5 STS [R0.X4+0x80], R7 ; /* 0x0000800700005388 */
/* 0x000fe80000004800 */
/*0680*/ @P4 LDS R9, [R0.X4+0x80] ; /* 0x0000800000094984 */
/* 0x000e240000004800 */
/*0690*/ @P4 IADD3 R9, R6, R9, RZ ; /* 0x0000000906094210 */
/* 0x001fc40007ffe0ff */
/*06a0*/ @P3 LDS R6, [R0.X4+0x70] ; /* 0x0000700000063984 */
/* 0x000fe80000004800 */
/*06b0*/ @P4 STS [R0.X4+0x80], R9 ; /* 0x0000800900004388 */
/* 0x000fe80000004800 */
/*06c0*/ @P3 LDS R11, [R0.X4+0x80] ; /* 0x00008000000b3984 */
/* 0x000e240000004800 */
/*06d0*/ @P3 IMAD.IADD R11, R6, 0x1, R11 ; /* 0x00000001060b3824 */
/* 0x001fc400078e020b */
/*06e0*/ @P2 LDS R6, [R0.X4+0x60] ; /* 0x0000600000062984 */
/* 0x000fe80000004800 */
/*06f0*/ @P3 STS [R0.X4+0x80], R11 ; /* 0x0000800b00003388 */
/* 0x000fe80000004800 */
/*0700*/ @P2 LDS R13, [R0.X4+0x80] ; /* 0x00008000000d2984 */
/* 0x000e240000004800 */
/*0710*/ @P2 IMAD.IADD R13, R6, 0x1, R13 ; /* 0x00000001060d2824 */
/* 0x001fc400078e020d */
/*0720*/ @P1 LDS R6, [R0.X4+0x40] ; /* 0x0000400000061984 */
/* 0x000fe80000004800 */
/*0730*/ @P2 STS [R0.X4+0x80], R13 ; /* 0x0000800d00002388 */
/* 0x000fe80000004800 */
/*0740*/ @!P1 LDS R7, [R0.X4+0x80] ; /* 0x0000800000079984 */
/* 0x000fe80000004800 */
/*0750*/ @P1 LDS R15, [R0.X4+0x80] ; /* 0x00008000000f1984 */
/* 0x000e240000004800 */
/*0760*/ @P1 IMAD.IADD R7, R6, 0x1, R15 ; /* 0x0000000106071824 */
/* 0x001fca00078e020f */
/*0770*/ @P1 STS [R0.X4+0x80], R7 ; /* 0x0000800700001388 */
/* 0x000fe80000004800 */
/*0780*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0790*/ ISETP.NE.AND P1, PT, R10, 0x1f, PT ; /* 0x0000001f0a00780c */
/* 0x000fda0003f25270 */
/*07a0*/ @!P1 LDS R9, [R0.X4+0x80] ; /* 0x0000800000099984 */
/* 0x000e280000004800 */
/*07b0*/ @!P1 STS [R4.X4], R9 ; /* 0x0000000904009388 */
/* 0x0011e80000004800 */
/*07c0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*07d0*/ @P6 BRA 0x930 ; /* 0x0000015000006947 */
/* 0x000fea0003800000 */
/*07e0*/ @P5 LDS R6, [R0.X4] ; /* 0x0000000000065984 */
/* 0x001fe20000004800 */
/*07f0*/ ISETP.GE.U32.AND P1, PT, R10, 0x10, PT ; /* 0x000000100a00780c */
/* 0x000fc60003f26070 */
/*0800*/ @P5 LDS R9, [R0.X4+-0x4] ; /* 0xfffffc0000095984 */
/* 0x000e240000004800 */
/*0810*/ @P5 IMAD.IADD R9, R6, 0x1, R9 ; /* 0x0000000106095824 */
/* 0x001fe400078e0209 */
/*0820*/ @P4 LDS R6, [R0.X4+-0x8] ; /* 0xfffff80000064984 */
/* 0x000fe80000004800 */
/*0830*/ @P5 STS [R0.X4], R9 ; /* 0x0000000900005388 */
/* 0x000fe80000004800 */
/*0840*/ @P4 LDS R11, [R0.X4] ; /* 0x00000000000b4984 */
/* 0x000e240000004800 */
/*0850*/ @P4 IMAD.IADD R11, R6, 0x1, R11 ; /* 0x00000001060b4824 */
/* 0x001fc400078e020b */
/*0860*/ @P3 LDS R6, [R0.X4+-0x10] ; /* 0xfffff00000063984 */
/* 0x000fe80000004800 */
/*0870*/ @P4 STS [R0.X4], R11 ; /* 0x0000000b00004388 */
/* 0x000fe80000004800 */
/*0880*/ @P3 LDS R13, [R0.X4] ; /* 0x00000000000d3984 */
/* 0x000e240000004800 */
/*0890*/ @P3 IADD3 R13, R6, R13, RZ ; /* 0x0000000d060d3210 */
/* 0x001fc40007ffe0ff */
/*08a0*/ @P2 LDS R6, [R0.X4+-0x20] ; /* 0xffffe00000062984 */
/* 0x000fe80000004800 */
/*08b0*/ @P3 STS [R0.X4], R13 ; /* 0x0000000d00003388 */
/* 0x000fe80000004800 */
/*08c0*/ @P2 LDS R15, [R0.X4] ; /* 0x00000000000f2984 */
/* 0x000e240000004800 */
/*08d0*/ @P2 IMAD.IADD R9, R6, 0x1, R15 ; /* 0x0000000106092824 */
/* 0x001fc400078e020f */
/*08e0*/ @P1 LDS R6, [R0.X4+-0x40] ; /* 0xffffc00000061984 */
/* 0x000fe80000004800 */
/*08f0*/ @P2 STS [R0.X4], R9 ; /* 0x0000000900002388 */
/* 0x000fe80000004800 */
/*0900*/ @P1 LDS R15, [R0.X4] ; /* 0x00000000000f1984 */
/* 0x000e240000004800 */
/*0910*/ @P1 IMAD.IADD R11, R6, 0x1, R15 ; /* 0x00000001060b1824 */
/* 0x001fca00078e020f */
/*0920*/ @P1 STS [R0.X4], R11 ; /* 0x0000000b00001388 */
/* 0x0001e40000004800 */
/*0930*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x001fea0003800000 */
/*0940*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0950*/ @P0 LDS R4, [R4.X4+-0x4] ; /* 0xfffffc0004040984 */
/* 0x000e240000004800 */
/*0960*/ @P0 IMAD.IADD R7, R7, 0x1, R4 ; /* 0x0000000107070824 */
/* 0x001fe400078e0204 */
/*0970*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0980*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */
/* 0x000fc80003f0f070 */
/*0990*/ P2R R6, PR, RZ, 0x1 ; /* 0x00000001ff067803 */
/* 0x000fe20000000000 */
/*09a0*/ STS [R0.X4+0x80], R7 ; /* 0x0000800700007388 */
/* 0x0001e80000004800 */
/*09b0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*09c0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*09d0*/ ISETP.NE.AND P0, PT, R6, RZ, PT ; /* 0x000000ff0600720c */
/* 0x000fda0003f05270 */
/*09e0*/ @!P0 LDS R0, [UR4+-0x4] ; /* 0xfffffc04ff008984 */
/* 0x001e240008000800 */
/*09f0*/ @!P0 IMAD.IADD R5, R5, 0x1, R0 ; /* 0x0000000105058824 */
/* 0x001fe400078e0200 */
/*0a00*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0a10*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe8000c101906 */
/*0a20*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0a30*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0a40*/ BRA 0xa40; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0a50*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0a60*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0a70*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0a80*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0a90*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0aa0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ab0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ac0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ad0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ae0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0af0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | /**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <stdio.h>
#include <stdlib.h>
static const int VECTOR_SIZE = 32;
static const int WORK_SIZE = 16;
/**
* This macro checks return value of the CUDA runtime call and exits
* the application if the call failed.
*/
#define CUDA_CHECK_RETURN(value) { \
cudaError_t _m_cudaStat = value; \
if (_m_cudaStat != cudaSuccess) { \
fprintf(stderr, "Error %s at line %d in file %s\n", \
cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \
exit(1); \
} }
__device__ int scan_warp(int *ptr, const unsigned int idx = threadIdx.x) {
const unsigned int aux = idx & 31;
if(aux >= 1) ptr[idx] = ptr[idx - 1] + ptr[idx];
if(aux >= 2) ptr[idx] = ptr[idx - 2] + ptr[idx];
if(aux >= 4) ptr[idx] = ptr[idx - 4] + ptr[idx];
if(aux >= 8) ptr[idx] = ptr[idx - 8] + ptr[idx];
if(aux >= 16) ptr[idx] = ptr[idx - 16] + ptr[idx];
return ptr[idx];
}
__device__ int scan_block(int *ptr, const unsigned int idx = threadIdx.x) {
const unsigned int aux = idx & 31;
const unsigned int warp_id = idx >> 5;
__shared__ int temp[32];
int val = scan_warp(ptr, idx);
__syncthreads();
if(aux == 31) temp[warp_id] = ptr[idx];
__syncthreads();
if(warp_id == 0) scan_warp(temp, idx);
__syncthreads();
if(warp_id > 0) val = temp[warp_id - 1] + val;
__syncthreads();
ptr[idx] = val;
__syncthreads();
return val;
}
__global__ void scan_global(int *ptr) {
const unsigned int idx = threadIdx.x;
const unsigned int aux = idx & blockDim.x;
const unsigned int bi = blockIdx.x;
extern __shared__ int temp[];
int val = scan_block(ptr, idx);
__syncthreads();
if(aux == blockDim.x) temp[bi] = ptr[idx];
__syncthreads();
if(bi == 0) scan_block(temp, idx);
__syncthreads();
if(bi > 0) val = temp[bi - 1] + val;
__syncthreads();
ptr[idx] = val;
__syncthreads();
return;
}
/**
* Host function that prepares data array and passes it to the CUDA kernel.
*/
int main(void) {
int *d = NULL;
int i;
unsigned int GRID_SIZE = (1 + VECTOR_SIZE) / WORK_SIZE;
unsigned int idata[VECTOR_SIZE], odata[VECTOR_SIZE];
for (i = 0; i < VECTOR_SIZE; i++)
idata[i] = (unsigned int) i;
CUDA_CHECK_RETURN(cudaMalloc((void**) &d, sizeof(int) * VECTOR_SIZE));
CUDA_CHECK_RETURN(
cudaMemcpy(d, idata, sizeof(int) * VECTOR_SIZE, cudaMemcpyHostToDevice));
scan_global<<<GRID_SIZE, WORK_SIZE, WORK_SIZE * sizeof(int)>>>(d);
CUDA_CHECK_RETURN(cudaDeviceSynchronize()); // Wait for the GPU launched work to complete
CUDA_CHECK_RETURN(cudaGetLastError());
CUDA_CHECK_RETURN(cudaMemcpy(odata, d, sizeof(int) * VECTOR_SIZE, cudaMemcpyDeviceToHost));
unsigned int sum = idata[0];
for (i = 0; i < VECTOR_SIZE - 1; i++) {
if(odata[i] != sum) {
printf("Erro !!!");
}
if((i + 1) < VECTOR_SIZE) {
sum += idata[i+1];
}
printf("Input value: %u, device output: %u\n", idata[i], odata[i]);
}
if(odata[VECTOR_SIZE - 1] != sum) {
printf("Erro !!!");
}
printf("Input value: %u, device output: %u\n", idata[i], odata[VECTOR_SIZE - 1]);
CUDA_CHECK_RETURN(cudaFree((void*) d));
CUDA_CHECK_RETURN(cudaDeviceReset());
return 0;
} | .file "tmpxft_00142c59_00000000-6_Scan.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2062:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2062:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z9scan_warpPij
.type _Z9scan_warpPij, @function
_Z9scan_warpPij:
.LFB2057:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2057:
.size _Z9scan_warpPij, .-_Z9scan_warpPij
.globl _Z10scan_blockPij
.type _Z10scan_blockPij, @function
_Z10scan_blockPij:
.LFB2058:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2058:
.size _Z10scan_blockPij, .-_Z10scan_blockPij
.globl _Z31__device_stub__Z11scan_globalPiPi
.type _Z31__device_stub__Z11scan_globalPiPi, @function
_Z31__device_stub__Z11scan_globalPiPi:
.LFB2084:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 8(%rsp)
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L11
.L7:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L12
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L11:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 120
pushq 24(%rsp)
.cfi_def_cfa_offset 128
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z11scan_globalPi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 112
jmp .L7
.L12:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2084:
.size _Z31__device_stub__Z11scan_globalPiPi, .-_Z31__device_stub__Z11scan_globalPiPi
.globl _Z11scan_globalPi
.type _Z11scan_globalPi, @function
_Z11scan_globalPi:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z31__device_stub__Z11scan_globalPiPi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _Z11scan_globalPi, .-_Z11scan_globalPi
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "/home/ubuntu/Datasets/stackv2/train-structured/wakim/prog-gpu-cuda/master/Scan/src/Scan.cu"
.align 8
.LC1:
.string "Error %s at line %d in file %s\n"
.section .rodata.str1.1,"aMS",@progbits,1
.LC2:
.string "Erro !!!"
.section .rodata.str1.8
.align 8
.LC3:
.string "Input value: %u, device output: %u\n"
.text
.globl main
.type main, @function
main:
.LFB2059:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $312, %rsp
.cfi_def_cfa_offset 368
movq %fs:40, %rax
movq %rax, 296(%rsp)
xorl %eax, %eax
movq $0, (%rsp)
.L16:
movl %eax, 32(%rsp,%rax,4)
addq $1, %rax
cmpq $32, %rax
jne .L16
movq %rsp, %rdi
movl $128, %esi
call cudaMalloc@PLT
testl %eax, %eax
jne .L32
leaq 32(%rsp), %rsi
movl $1, %ecx
movl $128, %edx
movq (%rsp), %rdi
call cudaMemcpy@PLT
testl %eax, %eax
jne .L33
movl $16, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $2, 8(%rsp)
movl $1, 12(%rsp)
movl $1, 16(%rsp)
movl $0, %r9d
movl $64, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L34
.L19:
call cudaDeviceSynchronize@PLT
testl %eax, %eax
jne .L35
call cudaGetLastError@PLT
testl %eax, %eax
jne .L36
leaq 160(%rsp), %rdi
movl $2, %ecx
movl $128, %edx
movq (%rsp), %rsi
call cudaMemcpy@PLT
testl %eax, %eax
jne .L37
movl 32(%rsp), %r12d
leaq 160(%rsp), %rbx
leaq 36(%rsp), %rbp
leaq 284(%rsp), %r15
jmp .L24
.L32:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rcx
leaq .LC0(%rip), %r9
movl $111, %r8d
leaq .LC1(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1, %edi
call exit@PLT
.L33:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rcx
leaq .LC0(%rip), %r9
movl $112, %r8d
leaq .LC1(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1, %edi
call exit@PLT
.L34:
movq (%rsp), %rdi
call _Z31__device_stub__Z11scan_globalPiPi
jmp .L19
.L35:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rcx
leaq .LC0(%rip), %r9
movl $117, %r8d
leaq .LC1(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1, %edi
call exit@PLT
.L36:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rcx
leaq .LC0(%rip), %r9
movl $118, %r8d
leaq .LC1(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1, %edi
call exit@PLT
.L37:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rcx
leaq .LC0(%rip), %r9
movl $119, %r8d
leaq .LC1(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1, %edi
call exit@PLT
.L23:
movl %r12d, %r13d
addl 0(%rbp), %r13d
movl %r13d, %r12d
movl (%r14), %ecx
movl -4(%rbp), %edx
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $4, %rbx
addq $4, %rbp
cmpq %r15, %rbx
je .L38
.L24:
movq %rbx, %r14
cmpl %r12d, (%rbx)
je .L23
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L23
.L38:
cmpl %r13d, 284(%rsp)
jne .L39
.L25:
movl 284(%rsp), %ecx
movl 156(%rsp), %edx
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq (%rsp), %rdi
call cudaFree@PLT
testl %eax, %eax
jne .L40
call cudaDeviceReset@PLT
testl %eax, %eax
jne .L41
movq 296(%rsp), %rax
subq %fs:40, %rax
jne .L42
movl $0, %eax
addq $312, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L39:
.cfi_restore_state
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L25
.L40:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rcx
leaq .LC0(%rip), %r9
movl $137, %r8d
leaq .LC1(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1, %edi
call exit@PLT
.L41:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rcx
leaq .LC0(%rip), %r9
movl $138, %r8d
leaq .LC1(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1, %edi
call exit@PLT
.L42:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2059:
.size main, .-main
.section .rodata.str1.1
.LC4:
.string "_Z11scan_globalPi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2087:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC4(%rip), %rdx
movq %rdx, %rcx
leaq _Z11scan_globalPi(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2087:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | /**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <stdio.h>
#include <stdlib.h>
static const int VECTOR_SIZE = 32;
static const int WORK_SIZE = 16;
/**
* This macro checks return value of the CUDA runtime call and exits
* the application if the call failed.
*/
#define CUDA_CHECK_RETURN(value) { \
cudaError_t _m_cudaStat = value; \
if (_m_cudaStat != cudaSuccess) { \
fprintf(stderr, "Error %s at line %d in file %s\n", \
cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \
exit(1); \
} }
__device__ int scan_warp(int *ptr, const unsigned int idx = threadIdx.x) {
const unsigned int aux = idx & 31;
if(aux >= 1) ptr[idx] = ptr[idx - 1] + ptr[idx];
if(aux >= 2) ptr[idx] = ptr[idx - 2] + ptr[idx];
if(aux >= 4) ptr[idx] = ptr[idx - 4] + ptr[idx];
if(aux >= 8) ptr[idx] = ptr[idx - 8] + ptr[idx];
if(aux >= 16) ptr[idx] = ptr[idx - 16] + ptr[idx];
return ptr[idx];
}
__device__ int scan_block(int *ptr, const unsigned int idx = threadIdx.x) {
const unsigned int aux = idx & 31;
const unsigned int warp_id = idx >> 5;
__shared__ int temp[32];
int val = scan_warp(ptr, idx);
__syncthreads();
if(aux == 31) temp[warp_id] = ptr[idx];
__syncthreads();
if(warp_id == 0) scan_warp(temp, idx);
__syncthreads();
if(warp_id > 0) val = temp[warp_id - 1] + val;
__syncthreads();
ptr[idx] = val;
__syncthreads();
return val;
}
__global__ void scan_global(int *ptr) {
const unsigned int idx = threadIdx.x;
const unsigned int aux = idx & blockDim.x;
const unsigned int bi = blockIdx.x;
extern __shared__ int temp[];
int val = scan_block(ptr, idx);
__syncthreads();
if(aux == blockDim.x) temp[bi] = ptr[idx];
__syncthreads();
if(bi == 0) scan_block(temp, idx);
__syncthreads();
if(bi > 0) val = temp[bi - 1] + val;
__syncthreads();
ptr[idx] = val;
__syncthreads();
return;
}
/**
* Host function that prepares data array and passes it to the CUDA kernel.
*/
int main(void) {
int *d = NULL;
int i;
unsigned int GRID_SIZE = (1 + VECTOR_SIZE) / WORK_SIZE;
unsigned int idata[VECTOR_SIZE], odata[VECTOR_SIZE];
for (i = 0; i < VECTOR_SIZE; i++)
idata[i] = (unsigned int) i;
CUDA_CHECK_RETURN(cudaMalloc((void**) &d, sizeof(int) * VECTOR_SIZE));
CUDA_CHECK_RETURN(
cudaMemcpy(d, idata, sizeof(int) * VECTOR_SIZE, cudaMemcpyHostToDevice));
scan_global<<<GRID_SIZE, WORK_SIZE, WORK_SIZE * sizeof(int)>>>(d);
CUDA_CHECK_RETURN(cudaDeviceSynchronize()); // Wait for the GPU launched work to complete
CUDA_CHECK_RETURN(cudaGetLastError());
CUDA_CHECK_RETURN(cudaMemcpy(odata, d, sizeof(int) * VECTOR_SIZE, cudaMemcpyDeviceToHost));
unsigned int sum = idata[0];
for (i = 0; i < VECTOR_SIZE - 1; i++) {
if(odata[i] != sum) {
printf("Erro !!!");
}
if((i + 1) < VECTOR_SIZE) {
sum += idata[i+1];
}
printf("Input value: %u, device output: %u\n", idata[i], odata[i]);
}
if(odata[VECTOR_SIZE - 1] != sum) {
printf("Erro !!!");
}
printf("Input value: %u, device output: %u\n", idata[i], odata[VECTOR_SIZE - 1]);
CUDA_CHECK_RETURN(cudaFree((void*) d));
CUDA_CHECK_RETURN(cudaDeviceReset());
return 0;
} | /**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
static const int VECTOR_SIZE = 32;
static const int WORK_SIZE = 16;
/**
* This macro checks return value of the CUDA runtime call and exits
* the application if the call failed.
*/
#define CUDA_CHECK_RETURN(value) { \
hipError_t _m_cudaStat = value; \
if (_m_cudaStat != hipSuccess) { \
fprintf(stderr, "Error %s at line %d in file %s\n", \
hipGetErrorString(_m_cudaStat), __LINE__, __FILE__); \
exit(1); \
} }
__device__ int scan_warp(int *ptr, const unsigned int idx = threadIdx.x) {
const unsigned int aux = idx & 31;
if(aux >= 1) ptr[idx] = ptr[idx - 1] + ptr[idx];
if(aux >= 2) ptr[idx] = ptr[idx - 2] + ptr[idx];
if(aux >= 4) ptr[idx] = ptr[idx - 4] + ptr[idx];
if(aux >= 8) ptr[idx] = ptr[idx - 8] + ptr[idx];
if(aux >= 16) ptr[idx] = ptr[idx - 16] + ptr[idx];
return ptr[idx];
}
__device__ int scan_block(int *ptr, const unsigned int idx = threadIdx.x) {
const unsigned int aux = idx & 31;
const unsigned int warp_id = idx >> 5;
__shared__ int temp[32];
int val = scan_warp(ptr, idx);
__syncthreads();
if(aux == 31) temp[warp_id] = ptr[idx];
__syncthreads();
if(warp_id == 0) scan_warp(temp, idx);
__syncthreads();
if(warp_id > 0) val = temp[warp_id - 1] + val;
__syncthreads();
ptr[idx] = val;
__syncthreads();
return val;
}
__global__ void scan_global(int *ptr) {
const unsigned int idx = threadIdx.x;
const unsigned int aux = idx & blockDim.x;
const unsigned int bi = blockIdx.x;
extern __shared__ int temp[];
int val = scan_block(ptr, idx);
__syncthreads();
if(aux == blockDim.x) temp[bi] = ptr[idx];
__syncthreads();
if(bi == 0) scan_block(temp, idx);
__syncthreads();
if(bi > 0) val = temp[bi - 1] + val;
__syncthreads();
ptr[idx] = val;
__syncthreads();
return;
}
/**
* Host function that prepares data array and passes it to the CUDA kernel.
*/
int main(void) {
int *d = NULL;
int i;
unsigned int GRID_SIZE = (1 + VECTOR_SIZE) / WORK_SIZE;
unsigned int idata[VECTOR_SIZE], odata[VECTOR_SIZE];
for (i = 0; i < VECTOR_SIZE; i++)
idata[i] = (unsigned int) i;
CUDA_CHECK_RETURN(hipMalloc((void**) &d, sizeof(int) * VECTOR_SIZE));
CUDA_CHECK_RETURN(
hipMemcpy(d, idata, sizeof(int) * VECTOR_SIZE, hipMemcpyHostToDevice));
scan_global<<<GRID_SIZE, WORK_SIZE, WORK_SIZE * sizeof(int)>>>(d);
CUDA_CHECK_RETURN(hipDeviceSynchronize()); // Wait for the GPU launched work to complete
CUDA_CHECK_RETURN(hipGetLastError());
CUDA_CHECK_RETURN(hipMemcpy(odata, d, sizeof(int) * VECTOR_SIZE, hipMemcpyDeviceToHost));
unsigned int sum = idata[0];
for (i = 0; i < VECTOR_SIZE - 1; i++) {
if(odata[i] != sum) {
printf("Erro !!!");
}
if((i + 1) < VECTOR_SIZE) {
sum += idata[i+1];
}
printf("Input value: %u, device output: %u\n", idata[i], odata[i]);
}
if(odata[VECTOR_SIZE - 1] != sum) {
printf("Erro !!!");
}
printf("Input value: %u, device output: %u\n", idata[i], odata[VECTOR_SIZE - 1]);
CUDA_CHECK_RETURN(hipFree((void*) d));
CUDA_CHECK_RETURN(hipDeviceReset());
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | /**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
static const int VECTOR_SIZE = 32;
static const int WORK_SIZE = 16;
/**
* This macro checks return value of the CUDA runtime call and exits
* the application if the call failed.
*/
#define CUDA_CHECK_RETURN(value) { \
hipError_t _m_cudaStat = value; \
if (_m_cudaStat != hipSuccess) { \
fprintf(stderr, "Error %s at line %d in file %s\n", \
hipGetErrorString(_m_cudaStat), __LINE__, __FILE__); \
exit(1); \
} }
__device__ int scan_warp(int *ptr, const unsigned int idx = threadIdx.x) {
const unsigned int aux = idx & 31;
if(aux >= 1) ptr[idx] = ptr[idx - 1] + ptr[idx];
if(aux >= 2) ptr[idx] = ptr[idx - 2] + ptr[idx];
if(aux >= 4) ptr[idx] = ptr[idx - 4] + ptr[idx];
if(aux >= 8) ptr[idx] = ptr[idx - 8] + ptr[idx];
if(aux >= 16) ptr[idx] = ptr[idx - 16] + ptr[idx];
return ptr[idx];
}
__device__ int scan_block(int *ptr, const unsigned int idx = threadIdx.x) {
const unsigned int aux = idx & 31;
const unsigned int warp_id = idx >> 5;
__shared__ int temp[32];
int val = scan_warp(ptr, idx);
__syncthreads();
if(aux == 31) temp[warp_id] = ptr[idx];
__syncthreads();
if(warp_id == 0) scan_warp(temp, idx);
__syncthreads();
if(warp_id > 0) val = temp[warp_id - 1] + val;
__syncthreads();
ptr[idx] = val;
__syncthreads();
return val;
}
__global__ void scan_global(int *ptr) {
const unsigned int idx = threadIdx.x;
const unsigned int aux = idx & blockDim.x;
const unsigned int bi = blockIdx.x;
extern __shared__ int temp[];
int val = scan_block(ptr, idx);
__syncthreads();
if(aux == blockDim.x) temp[bi] = ptr[idx];
__syncthreads();
if(bi == 0) scan_block(temp, idx);
__syncthreads();
if(bi > 0) val = temp[bi - 1] + val;
__syncthreads();
ptr[idx] = val;
__syncthreads();
return;
}
/**
* Host function that prepares data array and passes it to the CUDA kernel.
*/
int main(void) {
int *d = NULL;
int i;
unsigned int GRID_SIZE = (1 + VECTOR_SIZE) / WORK_SIZE;
unsigned int idata[VECTOR_SIZE], odata[VECTOR_SIZE];
for (i = 0; i < VECTOR_SIZE; i++)
idata[i] = (unsigned int) i;
CUDA_CHECK_RETURN(hipMalloc((void**) &d, sizeof(int) * VECTOR_SIZE));
CUDA_CHECK_RETURN(
hipMemcpy(d, idata, sizeof(int) * VECTOR_SIZE, hipMemcpyHostToDevice));
scan_global<<<GRID_SIZE, WORK_SIZE, WORK_SIZE * sizeof(int)>>>(d);
CUDA_CHECK_RETURN(hipDeviceSynchronize()); // Wait for the GPU launched work to complete
CUDA_CHECK_RETURN(hipGetLastError());
CUDA_CHECK_RETURN(hipMemcpy(odata, d, sizeof(int) * VECTOR_SIZE, hipMemcpyDeviceToHost));
unsigned int sum = idata[0];
for (i = 0; i < VECTOR_SIZE - 1; i++) {
if(odata[i] != sum) {
printf("Erro !!!");
}
if((i + 1) < VECTOR_SIZE) {
sum += idata[i+1];
}
printf("Input value: %u, device output: %u\n", idata[i], odata[i]);
}
if(odata[VECTOR_SIZE - 1] != sum) {
printf("Erro !!!");
}
printf("Input value: %u, device output: %u\n", idata[i], odata[VECTOR_SIZE - 1]);
CUDA_CHECK_RETURN(hipFree((void*) d));
CUDA_CHECK_RETURN(hipDeviceReset());
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z11scan_globalPi
.globl _Z11scan_globalPi
.p2align 8
.type _Z11scan_globalPi,@function
_Z11scan_globalPi:
s_clause 0x1
s_load_b64 s[6:7], s[0:1], 0x0
s_load_b32 s8, s[0:1], 0x14
v_and_b32_e32 v5, 31, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_ne_u32_e64 s0, 0, v5
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_2
v_dual_mov_b32 v2, 0 :: v_dual_add_nc_u32 v1, -1, v0
v_lshlrev_b32_e32 v3, 2, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[1:2], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v1, vcc_lo, s6, v1
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v2, vcc_lo, s7, v2, vcc_lo
s_clause 0x1
global_load_b32 v1, v[1:2], off
global_load_b32 v2, v3, s[6:7]
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v1, v2, v1
global_store_b32 v3, v1, s[6:7]
.LBB0_2:
s_or_b32 exec_lo, exec_lo, s1
v_cmp_lt_u32_e64 s1, 1, v5
s_delay_alu instid0(VALU_DEP_1)
s_and_saveexec_b32 s2, s1
s_cbranch_execz .LBB0_4
v_dual_mov_b32 v2, 0 :: v_dual_add_nc_u32 v1, -2, v0
v_lshlrev_b32_e32 v3, 2, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[1:2], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v1, vcc_lo, s6, v1
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v2, vcc_lo, s7, v2, vcc_lo
s_clause 0x1
global_load_b32 v1, v[1:2], off
global_load_b32 v2, v3, s[6:7]
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v1, v2, v1
global_store_b32 v3, v1, s[6:7]
.LBB0_4:
s_or_b32 exec_lo, exec_lo, s2
v_cmp_lt_u32_e64 s2, 3, v5
s_delay_alu instid0(VALU_DEP_1)
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_6
v_dual_mov_b32 v2, 0 :: v_dual_add_nc_u32 v1, -4, v0
v_lshlrev_b32_e32 v3, 2, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[1:2], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v1, vcc_lo, s6, v1
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v2, vcc_lo, s7, v2, vcc_lo
s_clause 0x1
global_load_b32 v1, v[1:2], off
global_load_b32 v2, v3, s[6:7]
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v1, v2, v1
global_store_b32 v3, v1, s[6:7]
.LBB0_6:
s_or_b32 exec_lo, exec_lo, s3
v_cmp_lt_u32_e64 s3, 7, v5
s_delay_alu instid0(VALU_DEP_1)
s_and_saveexec_b32 s4, s3
s_cbranch_execz .LBB0_8
v_dual_mov_b32 v2, 0 :: v_dual_add_nc_u32 v1, -8, v0
v_lshlrev_b32_e32 v3, 2, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[1:2], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v1, vcc_lo, s6, v1
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v2, vcc_lo, s7, v2, vcc_lo
s_clause 0x1
global_load_b32 v1, v[1:2], off
global_load_b32 v2, v3, s[6:7]
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v1, v2, v1
global_store_b32 v3, v1, s[6:7]
.LBB0_8:
s_or_b32 exec_lo, exec_lo, s4
v_cmp_lt_u32_e64 s4, 15, v5
s_delay_alu instid0(VALU_DEP_1)
s_and_saveexec_b32 s5, s4
s_cbranch_execz .LBB0_10
v_dual_mov_b32 v2, 0 :: v_dual_add_nc_u32 v1, -16, v0
v_lshlrev_b32_e32 v3, 2, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[1:2], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v1, vcc_lo, s6, v1
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v2, vcc_lo, s7, v2, vcc_lo
s_clause 0x1
global_load_b32 v1, v[1:2], off
global_load_b32 v2, v3, s[6:7]
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v1, v2, v1
global_store_b32 v3, v1, s[6:7]
.LBB0_10:
s_or_b32 exec_lo, exec_lo, s5
v_lshlrev_b32_e32 v1, 2, v0
v_lshrrev_b32_e32 v4, 5, v0
s_waitcnt lgkmcnt(0)
global_load_b32 v3, v1, s[6:7]
v_add_co_u32 v1, s5, s6, v1
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v2, null, s7, 0, s5
v_cmp_eq_u32_e64 s6, 31, v5
s_waitcnt vmcnt(0)
s_waitcnt_vscnt null, 0x0
s_barrier
buffer_gl0_inv
s_and_saveexec_b32 s5, s6
s_cbranch_execz .LBB0_12
global_load_b32 v5, v[1:2], off
v_lshlrev_b32_e32 v6, 2, v4
s_waitcnt vmcnt(0)
ds_store_b32 v6, v5
.LBB0_12:
s_or_b32 exec_lo, exec_lo, s5
v_cmp_lt_u32_e64 s5, 31, v0
v_cmp_gt_u32_e64 s7, 32, v0
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_and_saveexec_b32 s9, s7
s_cbranch_execz .LBB0_23
s_and_saveexec_b32 s10, s0
s_cbranch_execz .LBB0_15
v_lshlrev_b32_e32 v5, 2, v0
s_delay_alu instid0(VALU_DEP_1)
v_add_nc_u32_e32 v6, -4, v5
ds_load_b32 v6, v6
ds_load_b32 v7, v5
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v6, v7, v6
ds_store_b32 v5, v6
.LBB0_15:
s_or_b32 exec_lo, exec_lo, s10
s_and_saveexec_b32 s10, s1
s_cbranch_execz .LBB0_17
v_lshlrev_b32_e32 v5, 2, v0
s_delay_alu instid0(VALU_DEP_1)
v_add_nc_u32_e32 v6, -8, v5
ds_load_b32 v6, v6
ds_load_b32 v7, v5
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v6, v7, v6
ds_store_b32 v5, v6
.LBB0_17:
s_or_b32 exec_lo, exec_lo, s10
s_and_saveexec_b32 s10, s2
s_cbranch_execz .LBB0_19
v_lshlrev_b32_e32 v5, 2, v0
s_delay_alu instid0(VALU_DEP_1)
v_add_nc_u32_e32 v6, -16, v5
ds_load_b32 v6, v6
ds_load_b32 v7, v5
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v6, v7, v6
ds_store_b32 v5, v6
.LBB0_19:
s_or_b32 exec_lo, exec_lo, s10
s_and_saveexec_b32 s10, s3
s_cbranch_execz .LBB0_21
v_lshlrev_b32_e32 v5, 2, v0
s_delay_alu instid0(VALU_DEP_1)
v_subrev_nc_u32_e32 v6, 32, v5
ds_load_b32 v6, v6
ds_load_b32 v7, v5
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v6, v7, v6
ds_store_b32 v5, v6
.LBB0_21:
s_or_b32 exec_lo, exec_lo, s10
s_delay_alu instid0(SALU_CYCLE_1)
s_and_b32 exec_lo, exec_lo, s4
s_cbranch_execz .LBB0_23
v_lshlrev_b32_e32 v5, 2, v0
s_delay_alu instid0(VALU_DEP_1)
v_subrev_nc_u32_e32 v6, 64, v5
ds_load_b32 v6, v6
ds_load_b32 v7, v5
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v6, v7, v6
ds_store_b32 v5, v6
.LBB0_23:
s_or_b32 exec_lo, exec_lo, s9
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_and_saveexec_b32 s9, s5
s_cbranch_execz .LBB0_25
v_lshl_add_u32 v5, v4, 2, -4
ds_load_b32 v5, v5
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v3, v5, v3
.LBB0_25:
s_or_b32 exec_lo, exec_lo, s9
s_and_b32 s8, 0xffff, s8
s_delay_alu instid0(SALU_CYCLE_1)
v_and_b32_e32 v5, s8, v0
s_barrier
buffer_gl0_inv
global_store_b32 v[1:2], v3, off
s_waitcnt_vscnt null, 0x0
v_cmp_eq_u32_e32 vcc_lo, s8, v5
s_barrier
buffer_gl0_inv
s_barrier
buffer_gl0_inv
s_and_saveexec_b32 s8, vcc_lo
s_cbranch_execz .LBB0_27
global_load_b32 v5, v[1:2], off
s_lshl_b32 s9, s15, 2
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_addk_i32 s9, 0x80
v_mov_b32_e32 v6, s9
s_waitcnt vmcnt(0)
ds_store_b32 v6, v5
.LBB0_27:
s_or_b32 exec_lo, exec_lo, s8
s_cmp_lg_u32 s15, 0
s_waitcnt lgkmcnt(0)
s_cselect_b32 s8, -1, 0
s_barrier
s_and_b32 vcc_lo, exec_lo, s8
buffer_gl0_inv
s_cbranch_vccnz .LBB0_54
s_and_saveexec_b32 s9, s0
s_cbranch_execz .LBB0_30
v_lshl_add_u32 v5, v0, 2, 0x80
s_delay_alu instid0(VALU_DEP_1)
v_add_nc_u32_e32 v6, -4, v5
ds_load_b32 v6, v6
ds_load_b32 v7, v5
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v6, v7, v6
ds_store_b32 v5, v6
.LBB0_30:
s_or_b32 exec_lo, exec_lo, s9
s_and_saveexec_b32 s9, s1
s_cbranch_execz .LBB0_32
v_lshl_add_u32 v5, v0, 2, 0x80
s_delay_alu instid0(VALU_DEP_1)
v_add_nc_u32_e32 v6, -8, v5
ds_load_b32 v6, v6
ds_load_b32 v7, v5
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v6, v7, v6
ds_store_b32 v5, v6
.LBB0_32:
s_or_b32 exec_lo, exec_lo, s9
s_and_saveexec_b32 s9, s2
s_cbranch_execz .LBB0_34
v_lshl_add_u32 v5, v0, 2, 0x80
s_delay_alu instid0(VALU_DEP_1)
v_add_nc_u32_e32 v6, -16, v5
ds_load_b32 v6, v6
ds_load_b32 v7, v5
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v6, v7, v6
ds_store_b32 v5, v6
.LBB0_34:
s_or_b32 exec_lo, exec_lo, s9
s_and_saveexec_b32 s9, s3
s_cbranch_execz .LBB0_36
v_lshl_add_u32 v5, v0, 2, 0x80
s_delay_alu instid0(VALU_DEP_1)
v_subrev_nc_u32_e32 v6, 32, v5
ds_load_b32 v6, v6
ds_load_b32 v7, v5
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v6, v7, v6
ds_store_b32 v5, v6
.LBB0_36:
s_or_b32 exec_lo, exec_lo, s9
s_and_saveexec_b32 s9, s4
s_cbranch_execz .LBB0_38
v_lshl_add_u32 v5, v0, 2, 0x80
s_delay_alu instid0(VALU_DEP_1)
v_subrev_nc_u32_e32 v6, 64, v5
ds_load_b32 v6, v6
ds_load_b32 v7, v5
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v6, v7, v6
ds_store_b32 v5, v6
.LBB0_38:
s_or_b32 exec_lo, exec_lo, s9
v_lshl_add_u32 v5, v0, 2, 0x80
ds_load_b32 v6, v5
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_and_saveexec_b32 s9, s6
s_cbranch_execz .LBB0_40
ds_load_b32 v7, v5
v_lshlrev_b32_e32 v8, 2, v4
s_waitcnt lgkmcnt(0)
ds_store_b32 v8, v7
.LBB0_40:
s_or_b32 exec_lo, exec_lo, s9
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_and_saveexec_b32 s6, s7
s_cbranch_execz .LBB0_51
s_and_saveexec_b32 s7, s0
s_cbranch_execz .LBB0_43
v_lshlrev_b32_e32 v7, 2, v0
s_delay_alu instid0(VALU_DEP_1)
v_add_nc_u32_e32 v8, -4, v7
ds_load_b32 v8, v8
ds_load_b32 v9, v7
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v8, v9, v8
ds_store_b32 v7, v8
.LBB0_43:
s_or_b32 exec_lo, exec_lo, s7
s_and_saveexec_b32 s0, s1
s_cbranch_execz .LBB0_45
v_lshlrev_b32_e32 v7, 2, v0
s_delay_alu instid0(VALU_DEP_1)
v_add_nc_u32_e32 v8, -8, v7
ds_load_b32 v8, v8
ds_load_b32 v9, v7
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v8, v9, v8
ds_store_b32 v7, v8
.LBB0_45:
s_or_b32 exec_lo, exec_lo, s0
s_and_saveexec_b32 s0, s2
s_cbranch_execz .LBB0_47
v_lshlrev_b32_e32 v7, 2, v0
s_delay_alu instid0(VALU_DEP_1)
v_add_nc_u32_e32 v8, -16, v7
ds_load_b32 v8, v8
ds_load_b32 v9, v7
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v8, v9, v8
ds_store_b32 v7, v8
.LBB0_47:
s_or_b32 exec_lo, exec_lo, s0
s_and_saveexec_b32 s0, s3
s_cbranch_execz .LBB0_49
v_lshlrev_b32_e32 v7, 2, v0
s_delay_alu instid0(VALU_DEP_1)
v_subrev_nc_u32_e32 v8, 32, v7
ds_load_b32 v8, v8
ds_load_b32 v9, v7
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v8, v9, v8
ds_store_b32 v7, v8
.LBB0_49:
s_or_b32 exec_lo, exec_lo, s0
s_delay_alu instid0(SALU_CYCLE_1)
s_and_b32 exec_lo, exec_lo, s4
s_cbranch_execz .LBB0_51
v_lshlrev_b32_e32 v0, 2, v0
s_delay_alu instid0(VALU_DEP_1)
v_subrev_nc_u32_e32 v7, 64, v0
ds_load_b32 v7, v7
ds_load_b32 v8, v0
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v7, v8, v7
ds_store_b32 v0, v7
.LBB0_51:
s_or_b32 exec_lo, exec_lo, s6
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_and_saveexec_b32 s0, s5
s_cbranch_execz .LBB0_53
v_lshl_add_u32 v0, v4, 2, -4
ds_load_b32 v0, v0
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v6, v0, v6
.LBB0_53:
s_or_b32 exec_lo, exec_lo, s0
s_barrier
buffer_gl0_inv
ds_store_b32 v5, v6
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
.LBB0_54:
s_and_not1_b32 vcc_lo, exec_lo, s8
s_barrier
buffer_gl0_inv
s_cbranch_vccnz .LBB0_56
s_lshl_b32 s0, s15, 2
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_addk_i32 s0, 0x80
s_add_i32 s0, s0, -4
s_delay_alu instid0(SALU_CYCLE_1)
v_mov_b32_e32 v0, s0
ds_load_b32 v0, v0
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v3, v0, v3
.LBB0_56:
s_barrier
buffer_gl0_inv
global_store_b32 v[1:2], v3, off
s_waitcnt_vscnt null, 0x0
s_barrier
buffer_gl0_inv
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z11scan_globalPi
.amdhsa_group_segment_fixed_size 128
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 264
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 10
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z11scan_globalPi, .Lfunc_end0-_Z11scan_globalPi
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: hidden_block_count_x
- .offset: 12
.size: 4
.value_kind: hidden_block_count_y
- .offset: 16
.size: 4
.value_kind: hidden_block_count_z
- .offset: 20
.size: 2
.value_kind: hidden_group_size_x
- .offset: 22
.size: 2
.value_kind: hidden_group_size_y
- .offset: 24
.size: 2
.value_kind: hidden_group_size_z
- .offset: 26
.size: 2
.value_kind: hidden_remainder_x
- .offset: 28
.size: 2
.value_kind: hidden_remainder_y
- .offset: 30
.size: 2
.value_kind: hidden_remainder_z
- .offset: 48
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 72
.size: 2
.value_kind: hidden_grid_dims
- .offset: 128
.size: 4
.value_kind: hidden_dynamic_lds_size
.group_segment_fixed_size: 128
.kernarg_segment_align: 8
.kernarg_segment_size: 264
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z11scan_globalPi
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z11scan_globalPi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 10
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | /**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
static const int VECTOR_SIZE = 32;
static const int WORK_SIZE = 16;
/**
* This macro checks return value of the CUDA runtime call and exits
* the application if the call failed.
*/
#define CUDA_CHECK_RETURN(value) { \
hipError_t _m_cudaStat = value; \
if (_m_cudaStat != hipSuccess) { \
fprintf(stderr, "Error %s at line %d in file %s\n", \
hipGetErrorString(_m_cudaStat), __LINE__, __FILE__); \
exit(1); \
} }
__device__ int scan_warp(int *ptr, const unsigned int idx = threadIdx.x) {
const unsigned int aux = idx & 31;
if(aux >= 1) ptr[idx] = ptr[idx - 1] + ptr[idx];
if(aux >= 2) ptr[idx] = ptr[idx - 2] + ptr[idx];
if(aux >= 4) ptr[idx] = ptr[idx - 4] + ptr[idx];
if(aux >= 8) ptr[idx] = ptr[idx - 8] + ptr[idx];
if(aux >= 16) ptr[idx] = ptr[idx - 16] + ptr[idx];
return ptr[idx];
}
__device__ int scan_block(int *ptr, const unsigned int idx = threadIdx.x) {
const unsigned int aux = idx & 31;
const unsigned int warp_id = idx >> 5;
__shared__ int temp[32];
int val = scan_warp(ptr, idx);
__syncthreads();
if(aux == 31) temp[warp_id] = ptr[idx];
__syncthreads();
if(warp_id == 0) scan_warp(temp, idx);
__syncthreads();
if(warp_id > 0) val = temp[warp_id - 1] + val;
__syncthreads();
ptr[idx] = val;
__syncthreads();
return val;
}
__global__ void scan_global(int *ptr) {
const unsigned int idx = threadIdx.x;
const unsigned int aux = idx & blockDim.x;
const unsigned int bi = blockIdx.x;
extern __shared__ int temp[];
int val = scan_block(ptr, idx);
__syncthreads();
if(aux == blockDim.x) temp[bi] = ptr[idx];
__syncthreads();
if(bi == 0) scan_block(temp, idx);
__syncthreads();
if(bi > 0) val = temp[bi - 1] + val;
__syncthreads();
ptr[idx] = val;
__syncthreads();
return;
}
/**
* Host function that prepares data array and passes it to the CUDA kernel.
*/
int main(void) {
int *d = NULL;
int i;
unsigned int GRID_SIZE = (1 + VECTOR_SIZE) / WORK_SIZE;
unsigned int idata[VECTOR_SIZE], odata[VECTOR_SIZE];
for (i = 0; i < VECTOR_SIZE; i++)
idata[i] = (unsigned int) i;
CUDA_CHECK_RETURN(hipMalloc((void**) &d, sizeof(int) * VECTOR_SIZE));
CUDA_CHECK_RETURN(
hipMemcpy(d, idata, sizeof(int) * VECTOR_SIZE, hipMemcpyHostToDevice));
scan_global<<<GRID_SIZE, WORK_SIZE, WORK_SIZE * sizeof(int)>>>(d);
CUDA_CHECK_RETURN(hipDeviceSynchronize()); // Wait for the GPU launched work to complete
CUDA_CHECK_RETURN(hipGetLastError());
CUDA_CHECK_RETURN(hipMemcpy(odata, d, sizeof(int) * VECTOR_SIZE, hipMemcpyDeviceToHost));
unsigned int sum = idata[0];
for (i = 0; i < VECTOR_SIZE - 1; i++) {
if(odata[i] != sum) {
printf("Erro !!!");
}
if((i + 1) < VECTOR_SIZE) {
sum += idata[i+1];
}
printf("Input value: %u, device output: %u\n", idata[i], odata[i]);
}
if(odata[VECTOR_SIZE - 1] != sum) {
printf("Erro !!!");
}
printf("Input value: %u, device output: %u\n", idata[i], odata[VECTOR_SIZE - 1]);
CUDA_CHECK_RETURN(hipFree((void*) d));
CUDA_CHECK_RETURN(hipDeviceReset());
return 0;
} | .text
.file "Scan.hip"
.globl _Z26__device_stub__scan_globalPi # -- Begin function _Z26__device_stub__scan_globalPi
.p2align 4, 0x90
.type _Z26__device_stub__scan_globalPi,@function
_Z26__device_stub__scan_globalPi: # @_Z26__device_stub__scan_globalPi
.cfi_startproc
# %bb.0:
subq $72, %rsp
.cfi_def_cfa_offset 80
movq %rdi, 64(%rsp)
leaq 64(%rsp), %rax
movq %rax, (%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
movq %rsp, %r9
movl $_Z11scan_globalPi, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $88, %rsp
.cfi_adjust_cfa_offset -88
retq
.Lfunc_end0:
.size _Z26__device_stub__scan_globalPi, .Lfunc_end0-_Z26__device_stub__scan_globalPi
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $320, %rsp # imm = 0x140
.cfi_def_cfa_offset 352
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq $0, 8(%rsp)
xorl %eax, %eax
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
movl %eax, 192(%rsp,%rax,4)
incq %rax
cmpq $32, %rax
jne .LBB1_1
# %bb.2:
leaq 8(%rsp), %rdi
movl $128, %esi
callq hipMalloc
testl %eax, %eax
jne .LBB1_3
# %bb.5:
movq 8(%rsp), %rdi
leaq 192(%rsp), %rsi
movl $128, %edx
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB1_6
# %bb.7:
movabsq $4294967298, %rdi # imm = 0x100000002
leaq 14(%rdi), %rdx
movl $64, %r8d
movl $1, %esi
movl $1, %ecx
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_9
# %bb.8:
movq 8(%rsp), %rax
movq %rax, 56(%rsp)
leaq 56(%rsp), %rax
movq %rax, 16(%rsp)
leaq 64(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 16(%rsp), %r9
movl $_Z11scan_globalPi, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_9:
callq hipDeviceSynchronize
testl %eax, %eax
jne .LBB1_10
# %bb.11:
callq hipGetLastError
testl %eax, %eax
jne .LBB1_12
# %bb.13:
movq 8(%rsp), %rsi
leaq 64(%rsp), %rdi
movl $128, %edx
movl $2, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB1_14
# %bb.15:
movl 192(%rsp), %ebx
xorl %r14d, %r14d
jmp .LBB1_16
.p2align 4, 0x90
.LBB1_18: # in Loop: Header=BB1_16 Depth=1
leaq 1(%r14), %r15
addl 196(%rsp,%r14,4), %ebx
movl 192(%rsp,%r14,4), %esi
movl 64(%rsp,%r14,4), %edx
movl $.L.str.3, %edi
xorl %eax, %eax
callq printf
movq %r15, %r14
cmpq $31, %r15
je .LBB1_19
.LBB1_16: # =>This Inner Loop Header: Depth=1
cmpl %ebx, 64(%rsp,%r14,4)
je .LBB1_18
# %bb.17: # in Loop: Header=BB1_16 Depth=1
movl $.L.str.2, %edi
xorl %eax, %eax
callq printf
jmp .LBB1_18
.LBB1_19:
cmpl %ebx, 188(%rsp)
je .LBB1_21
# %bb.20:
movl $.L.str.2, %edi
xorl %eax, %eax
callq printf
.LBB1_21:
movl 316(%rsp), %esi
movl 188(%rsp), %edx
movl $.L.str.3, %edi
xorl %eax, %eax
callq printf
movq 8(%rsp), %rdi
callq hipFree
testl %eax, %eax
jne .LBB1_22
# %bb.23:
callq hipDeviceReset
testl %eax, %eax
jne .LBB1_24
# %bb.25:
xorl %eax, %eax
addq $320, %rsp # imm = 0x140
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.LBB1_3:
.cfi_def_cfa_offset 352
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %esi
movl $.L.str.1, %r8d
movq %rbx, %rdi
movq %rax, %rdx
movl $113, %ecx
jmp .LBB1_4
.LBB1_6:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %esi
movl $.L.str.1, %r8d
movq %rbx, %rdi
movq %rax, %rdx
movl $115, %ecx
jmp .LBB1_4
.LBB1_10:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %esi
movl $.L.str.1, %r8d
movq %rbx, %rdi
movq %rax, %rdx
movl $119, %ecx
jmp .LBB1_4
.LBB1_12:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %esi
movl $.L.str.1, %r8d
movq %rbx, %rdi
movq %rax, %rdx
movl $120, %ecx
jmp .LBB1_4
.LBB1_14:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %esi
movl $.L.str.1, %r8d
movq %rbx, %rdi
movq %rax, %rdx
movl $121, %ecx
jmp .LBB1_4
.LBB1_22:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %esi
movl $.L.str.1, %r8d
movq %rbx, %rdi
movq %rax, %rdx
movl $139, %ecx
jmp .LBB1_4
.LBB1_24:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %esi
movl $.L.str.1, %r8d
movq %rbx, %rdi
movq %rax, %rdx
movl $140, %ecx
.LBB1_4:
xorl %eax, %eax
callq fprintf
movl $1, %edi
callq exit
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z11scan_globalPi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z11scan_globalPi,@object # @_Z11scan_globalPi
.section .rodata,"a",@progbits
.globl _Z11scan_globalPi
.p2align 3, 0x0
_Z11scan_globalPi:
.quad _Z26__device_stub__scan_globalPi
.size _Z11scan_globalPi, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Error %s at line %d in file %s\n"
.size .L.str, 32
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "/home/ubuntu/Datasets/stackv2/train-structured-repos-hip/wakim/prog-gpu-cuda/master/Scan/src/Scan.hip"
.size .L.str.1, 102
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "Erro !!!"
.size .L.str.2, 9
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "Input value: %u, device output: %u\n"
.size .L.str.3, 36
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z11scan_globalPi"
.size .L__unnamed_1, 18
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z26__device_stub__scan_globalPi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z11scan_globalPi
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z11scan_globalPi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e220000002100 */
/*0020*/ IMAD.MOV.U32 R5, RZ, RZ, 0x4 ; /* 0x00000004ff057424 */
/* 0x000fe200078e00ff */
/*0030*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fe20000000a00 */
/*0040*/ LOP3.LUT P5, R10, R0.reuse, 0x1f, RZ, 0xc0, !PT ; /* 0x0000001f000a7812 */
/* 0x041fe400078ac0ff */
/*0050*/ IMAD.WIDE.U32 R2, R0, R5, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fd600078e0005 */
/*0060*/ @P5 IADD3 R6, R0, -0x1, RZ ; /* 0xffffffff00065810 */
/* 0x000fe20007ffe0ff */
/*0070*/ @P5 LDG.E R4, [R2.64] ; /* 0x0000000602045981 */
/* 0x000ea8000c1e1900 */
/*0080*/ @P5 IMAD.WIDE.U32 R6, R6, R5, c[0x0][0x160] ; /* 0x0000580006065625 */
/* 0x000fcc00078e0005 */
/*0090*/ @P5 LDG.E R7, [R6.64] ; /* 0x0000000606075981 */
/* 0x000ea2000c1e1900 */
/*00a0*/ ISETP.GE.U32.AND P4, PT, R10, 0x2, PT ; /* 0x000000020a00780c */
/* 0x000fda0003f86070 */
/*00b0*/ @P4 IADD3 R8, R0, -0x2, RZ ; /* 0xfffffffe00084810 */
/* 0x000fca0007ffe0ff */
/*00c0*/ @P4 IMAD.WIDE.U32 R8, R8, R5, c[0x0][0x160] ; /* 0x0000580008084625 */
/* 0x000fc800078e0005 */
/*00d0*/ @P5 IMAD.IADD R11, R4, 0x1, R7 ; /* 0x00000001040b5824 */
/* 0x004fca00078e0207 */
/*00e0*/ @P5 STG.E [R2.64], R11 ; /* 0x0000000b02005986 */
/* 0x0001e8000c101906 */
/*00f0*/ @P4 LDG.E R8, [R8.64] ; /* 0x0000000608084981 */
/* 0x000ea8000c1e1900 */
/*0100*/ @P4 LDG.E R13, [R2.64] ; /* 0x00000006020d4981 */
/* 0x000ea2000c1e1900 */
/*0110*/ ISETP.GE.U32.AND P3, PT, R10, 0x4, PT ; /* 0x000000040a00780c */
/* 0x000fda0003f66070 */
/*0120*/ @P3 IADD3 R4, R0, -0x4, RZ ; /* 0xfffffffc00043810 */
/* 0x000fca0007ffe0ff */
/*0130*/ @P3 IMAD.WIDE.U32 R6, R4, R5, c[0x0][0x160] ; /* 0x0000580004063625 */
/* 0x000fc800078e0005 */
/*0140*/ @P4 IMAD.IADD R13, R8, 0x1, R13 ; /* 0x00000001080d4824 */
/* 0x004fca00078e020d */
/*0150*/ @P4 STG.E [R2.64], R13 ; /* 0x0000000d02004986 */
/* 0x0003e8000c101906 */
/*0160*/ @P3 LDG.E R6, [R6.64] ; /* 0x0000000606063981 */
/* 0x000e28000c1e1900 */
/*0170*/ @P3 LDG.E R15, [R2.64] ; /* 0x00000006020f3981 */
/* 0x000e22000c1e1900 */
/*0180*/ ISETP.GE.U32.AND P2, PT, R10, 0x8, PT ; /* 0x000000080a00780c */
/* 0x000fda0003f46070 */
/*0190*/ @P2 IADD3 R4, R0, -0x8, RZ ; /* 0xfffffff800042810 */
/* 0x000fca0007ffe0ff */
/*01a0*/ @P2 IMAD.WIDE.U32 R8, R4, R5, c[0x0][0x160] ; /* 0x0000580004082625 */
/* 0x000fc800078e0005 */
/*01b0*/ @P3 IMAD.IADD R11, R6, 0x1, R15 ; /* 0x00000001060b3824 */
/* 0x001fca00078e020f */
/*01c0*/ @P3 STG.E [R2.64], R11 ; /* 0x0000000b02003986 */
/* 0x0001e8000c101906 */
/*01d0*/ @P2 LDG.E R8, [R8.64] ; /* 0x0000000608082981 */
/* 0x000e68000c1e1900 */
/*01e0*/ @P2 LDG.E R15, [R2.64] ; /* 0x00000006020f2981 */
/* 0x000e62000c1e1900 */
/*01f0*/ ISETP.GT.U32.AND P1, PT, R10, 0xf, PT ; /* 0x0000000f0a00780c */
/* 0x000fda0003f24070 */
/*0200*/ @P1 IADD3 R4, R0, -0x10, RZ ; /* 0xfffffff000041810 */
/* 0x000fca0007ffe0ff */
/*0210*/ @P1 IMAD.WIDE.U32 R6, R4, R5, c[0x0][0x160] ; /* 0x0000580004061625 */
/* 0x000fc800078e0005 */
/*0220*/ @P2 IMAD.IADD R13, R8, 0x1, R15 ; /* 0x00000001080d2824 */
/* 0x002fca00078e020f */
/*0230*/ @P2 STG.E [R2.64], R13 ; /* 0x0000000d02002986 */
/* 0x0001e8000c101906 */
/*0240*/ @P1 LDG.E R6, [R6.64] ; /* 0x0000000606061981 */
/* 0x000ea8000c1e1900 */
/*0250*/ @!P1 LDG.E R5, [R2.64] ; /* 0x0000000602059981 */
/* 0x000168000c1e1900 */
/*0260*/ @P1 LDG.E R15, [R2.64] ; /* 0x00000006020f1981 */
/* 0x000ea2000c1e1900 */
/*0270*/ ISETP.NE.AND P6, PT, R10, 0x1f, PT ; /* 0x0000001f0a00780c */
/* 0x000fe20003fc5270 */
/*0280*/ @P1 IMAD.IADD R5, R6, 0x1, R15 ; /* 0x0000000106051824 */
/* 0x004fca00078e020f */
/*0290*/ @P1 STG.E [R2.64], R5 ; /* 0x0000000502001986 */
/* 0x0001e8000c101906 */
/*02a0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*02b0*/ @!P6 LDG.E R9, [R2.64] ; /* 0x000000060209e981 */
/* 0x000ea2000c1e1900 */
/*02c0*/ P2R R4, PR, RZ, 0x40 ; /* 0x00000040ff047803 */
/* 0x000fc40000000000 */
/*02d0*/ SHF.R.U32.HI R4, RZ, 0x5, R0 ; /* 0x00000005ff047819 */
/* 0x000fc80000011600 */
/*02e0*/ ISETP.NE.AND P0, PT, R4, RZ, PT ; /* 0x000000ff0400720c */
/* 0x000fe20003f05270 */
/*02f0*/ S2UR UR5, SR_CTAID.X ; /* 0x00000000000579c3 */
/* 0x000e620000002500 */
/*0300*/ BSSY B0, 0x4b0 ; /* 0x000001a000007945 */
/* 0x000fe40003800000 */
/*0310*/ P2R R6, PR, RZ, 0x1 ; /* 0x00000001ff067803 */
/* 0x000fe20000000000 */
/*0320*/ @!P6 STS [R4.X4], R9 ; /* 0x000000090400e388 */
/* 0x0041e80000004800 */
/*0330*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0340*/ @P0 BRA 0x4a0 ; /* 0x0000015000000947 */
/* 0x000fea0003800000 */
/*0350*/ @P5 LDS R6, [R0.X4] ; /* 0x0000000000065984 */
/* 0x003fe20000004800 */
/*0360*/ ISETP.GE.U32.AND P0, PT, R10, 0x10, PT ; /* 0x000000100a00780c */
/* 0x000fc60003f06070 */
/*0370*/ @P5 LDS R7, [R0.X4+-0x4] ; /* 0xfffffc0000075984 */
/* 0x000e240000004800 */
/*0380*/ @P5 IMAD.IADD R7, R6, 0x1, R7 ; /* 0x0000000106075824 */
/* 0x001fe400078e0207 */
/*0390*/ @P4 LDS R6, [R0.X4+-0x8] ; /* 0xfffff80000064984 */
/* 0x000fe80000004800 */
/*03a0*/ @P5 STS [R0.X4], R7 ; /* 0x0000000700005388 */
/* 0x000fe80000004800 */
/*03b0*/ @P4 LDS R9, [R0.X4] ; /* 0x0000000000094984 */
/* 0x000e240000004800 */
/*03c0*/ @P4 IADD3 R9, R6, R9, RZ ; /* 0x0000000906094210 */
/* 0x001fc40007ffe0ff */
/*03d0*/ @P3 LDS R6, [R0.X4+-0x10] ; /* 0xfffff00000063984 */
/* 0x000fe80000004800 */
/*03e0*/ @P4 STS [R0.X4], R9 ; /* 0x0000000900004388 */
/* 0x000fe80000004800 */
/*03f0*/ @P3 LDS R11, [R0.X4] ; /* 0x00000000000b3984 */
/* 0x000e240000004800 */
/*0400*/ @P3 IMAD.IADD R11, R6, 0x1, R11 ; /* 0x00000001060b3824 */
/* 0x001fc400078e020b */
/*0410*/ @P2 LDS R6, [R0.X4+-0x20] ; /* 0xffffe00000062984 */
/* 0x000fe80000004800 */
/*0420*/ @P3 STS [R0.X4], R11 ; /* 0x0000000b00003388 */
/* 0x000fe80000004800 */
/*0430*/ @P2 LDS R13, [R0.X4] ; /* 0x00000000000d2984 */
/* 0x000e240000004800 */
/*0440*/ @P2 IMAD.IADD R13, R6, 0x1, R13 ; /* 0x00000001060d2824 */
/* 0x001fc400078e020d */
/*0450*/ @P0 LDS R6, [R0.X4+-0x40] ; /* 0xffffc00000060984 */
/* 0x000fe80000004800 */
/*0460*/ @P2 STS [R0.X4], R13 ; /* 0x0000000d00002388 */
/* 0x000fe80000004800 */
/*0470*/ @P0 LDS R7, [R0.X4] ; /* 0x0000000000070984 */
/* 0x000e240000004800 */
/*0480*/ @P0 IMAD.IADD R7, R6, 0x1, R7 ; /* 0x0000000106070824 */
/* 0x001fca00078e0207 */
/*0490*/ @P0 STS [R0.X4], R7 ; /* 0x0000000700000388 */
/* 0x0001e40000004800 */
/*04a0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x003fea0003800000 */
/*04b0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*04c0*/ ISETP.NE.AND P0, PT, R4, RZ, PT ; /* 0x000000ff0400720c */
/* 0x000fe20003f05270 */
/*04d0*/ ULEA UR4, UR5, 0x80, 0x2 ; /* 0x0000008005047891 */
/* 0x000fc8000f8e103f */
/*04e0*/ BSSY B0, 0x5b0 ; /* 0x000000c000007945 */
/* 0x000ff00003800000 */
/*04f0*/ @P0 LDS R6, [R4.X4+-0x4] ; /* 0xfffffc0004060984 */
/* 0x000e240000004800 */
/*0500*/ @P0 IMAD.IADD R5, R5, 0x1, R6 ; /* 0x0000000105050824 */
/* 0x021fc400078e0206 */
/*0510*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0520*/ LOP3.LUT R6, R0, c[0x0][0x0], RZ, 0xc0, !PT ; /* 0x0000000000067a12 */
/* 0x000fc800078ec0ff */
/*0530*/ ISETP.NE.AND P6, PT, R6, c[0x0][0x0], PT ; /* 0x0000000006007a0c */
/* 0x000fe20003fc5270 */
/*0540*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x0001e8000c101906 */
/*0550*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0560*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0570*/ @P6 BRA 0x5a0 ; /* 0x0000002000006947 */
/* 0x000fea0003800000 */
/*0580*/ LDG.E R6, [R2.64] ; /* 0x0000000602067981 */
/* 0x001ea8000c1e1900 */
/*0590*/ STS [UR4], R6 ; /* 0x00000006ff007988 */
/* 0x0041e40008000804 */
/*05a0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x001fea0003800000 */
/*05b0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*05c0*/ PLOP3.LUT P6, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fc80003fce170 */
/*05d0*/ P2R R6, PR, RZ, 0x40 ; /* 0x00000040ff067803 */
/* 0x000fe40000000000 */
/*05e0*/ ISETP.NE.AND P6, PT, RZ, UR5, PT ; /* 0x00000005ff007c0c */
/* 0x000fda000bfc5270 */
/*05f0*/ @P6 BRA 0x9c0 ; /* 0x000003c000006947 */
/* 0x000fea0003800000 */
/*0600*/ @P5 LDS R6, [R0.X4+0x80] ; /* 0x0000800000065984 */
/* 0x000fe20000004800 */
/*0610*/ WARPSYNC 0xffffffff ; /* 0xffffffff00007948 */
/* 0x000fe20003800000 */
/*0620*/ ISETP.NE.AND P6, PT, R4, RZ, PT ; /* 0x000000ff0400720c */
/* 0x000fe20003fc5270 */
/*0630*/ BSSY B0, 0x940 ; /* 0x0000030000007945 */
/* 0x000fe20003800000 */
/*0640*/ @P5 LDS R7, [R0.X4+0x7c] ; /* 0x00007c0000075984 */
/* 0x000e240000004800 */
/*0650*/ @P5 IMAD.IADD R7, R6, 0x1, R7 ; /* 0x0000000106075824 */
/* 0x001fe400078e0207 */
/*0660*/ @P4 LDS R6, [R0.X4+0x78] ; /* 0x0000780000064984 */
/* 0x000fe80000004800 */
/*0670*/ @P5 STS [R0.X4+0x80], R7 ; /* 0x0000800700005388 */
/* 0x000fe80000004800 */
/*0680*/ @P4 LDS R9, [R0.X4+0x80] ; /* 0x0000800000094984 */
/* 0x000e240000004800 */
/*0690*/ @P4 IADD3 R9, R6, R9, RZ ; /* 0x0000000906094210 */
/* 0x001fc40007ffe0ff */
/*06a0*/ @P3 LDS R6, [R0.X4+0x70] ; /* 0x0000700000063984 */
/* 0x000fe80000004800 */
/*06b0*/ @P4 STS [R0.X4+0x80], R9 ; /* 0x0000800900004388 */
/* 0x000fe80000004800 */
/*06c0*/ @P3 LDS R11, [R0.X4+0x80] ; /* 0x00008000000b3984 */
/* 0x000e240000004800 */
/*06d0*/ @P3 IMAD.IADD R11, R6, 0x1, R11 ; /* 0x00000001060b3824 */
/* 0x001fc400078e020b */
/*06e0*/ @P2 LDS R6, [R0.X4+0x60] ; /* 0x0000600000062984 */
/* 0x000fe80000004800 */
/*06f0*/ @P3 STS [R0.X4+0x80], R11 ; /* 0x0000800b00003388 */
/* 0x000fe80000004800 */
/*0700*/ @P2 LDS R13, [R0.X4+0x80] ; /* 0x00008000000d2984 */
/* 0x000e240000004800 */
/*0710*/ @P2 IMAD.IADD R13, R6, 0x1, R13 ; /* 0x00000001060d2824 */
/* 0x001fc400078e020d */
/*0720*/ @P1 LDS R6, [R0.X4+0x40] ; /* 0x0000400000061984 */
/* 0x000fe80000004800 */
/*0730*/ @P2 STS [R0.X4+0x80], R13 ; /* 0x0000800d00002388 */
/* 0x000fe80000004800 */
/*0740*/ @!P1 LDS R7, [R0.X4+0x80] ; /* 0x0000800000079984 */
/* 0x000fe80000004800 */
/*0750*/ @P1 LDS R15, [R0.X4+0x80] ; /* 0x00008000000f1984 */
/* 0x000e240000004800 */
/*0760*/ @P1 IMAD.IADD R7, R6, 0x1, R15 ; /* 0x0000000106071824 */
/* 0x001fca00078e020f */
/*0770*/ @P1 STS [R0.X4+0x80], R7 ; /* 0x0000800700001388 */
/* 0x000fe80000004800 */
/*0780*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0790*/ ISETP.NE.AND P1, PT, R10, 0x1f, PT ; /* 0x0000001f0a00780c */
/* 0x000fda0003f25270 */
/*07a0*/ @!P1 LDS R9, [R0.X4+0x80] ; /* 0x0000800000099984 */
/* 0x000e280000004800 */
/*07b0*/ @!P1 STS [R4.X4], R9 ; /* 0x0000000904009388 */
/* 0x0011e80000004800 */
/*07c0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*07d0*/ @P6 BRA 0x930 ; /* 0x0000015000006947 */
/* 0x000fea0003800000 */
/*07e0*/ @P5 LDS R6, [R0.X4] ; /* 0x0000000000065984 */
/* 0x001fe20000004800 */
/*07f0*/ ISETP.GE.U32.AND P1, PT, R10, 0x10, PT ; /* 0x000000100a00780c */
/* 0x000fc60003f26070 */
/*0800*/ @P5 LDS R9, [R0.X4+-0x4] ; /* 0xfffffc0000095984 */
/* 0x000e240000004800 */
/*0810*/ @P5 IMAD.IADD R9, R6, 0x1, R9 ; /* 0x0000000106095824 */
/* 0x001fe400078e0209 */
/*0820*/ @P4 LDS R6, [R0.X4+-0x8] ; /* 0xfffff80000064984 */
/* 0x000fe80000004800 */
/*0830*/ @P5 STS [R0.X4], R9 ; /* 0x0000000900005388 */
/* 0x000fe80000004800 */
/*0840*/ @P4 LDS R11, [R0.X4] ; /* 0x00000000000b4984 */
/* 0x000e240000004800 */
/*0850*/ @P4 IMAD.IADD R11, R6, 0x1, R11 ; /* 0x00000001060b4824 */
/* 0x001fc400078e020b */
/*0860*/ @P3 LDS R6, [R0.X4+-0x10] ; /* 0xfffff00000063984 */
/* 0x000fe80000004800 */
/*0870*/ @P4 STS [R0.X4], R11 ; /* 0x0000000b00004388 */
/* 0x000fe80000004800 */
/*0880*/ @P3 LDS R13, [R0.X4] ; /* 0x00000000000d3984 */
/* 0x000e240000004800 */
/*0890*/ @P3 IADD3 R13, R6, R13, RZ ; /* 0x0000000d060d3210 */
/* 0x001fc40007ffe0ff */
/*08a0*/ @P2 LDS R6, [R0.X4+-0x20] ; /* 0xffffe00000062984 */
/* 0x000fe80000004800 */
/*08b0*/ @P3 STS [R0.X4], R13 ; /* 0x0000000d00003388 */
/* 0x000fe80000004800 */
/*08c0*/ @P2 LDS R15, [R0.X4] ; /* 0x00000000000f2984 */
/* 0x000e240000004800 */
/*08d0*/ @P2 IMAD.IADD R9, R6, 0x1, R15 ; /* 0x0000000106092824 */
/* 0x001fc400078e020f */
/*08e0*/ @P1 LDS R6, [R0.X4+-0x40] ; /* 0xffffc00000061984 */
/* 0x000fe80000004800 */
/*08f0*/ @P2 STS [R0.X4], R9 ; /* 0x0000000900002388 */
/* 0x000fe80000004800 */
/*0900*/ @P1 LDS R15, [R0.X4] ; /* 0x00000000000f1984 */
/* 0x000e240000004800 */
/*0910*/ @P1 IMAD.IADD R11, R6, 0x1, R15 ; /* 0x00000001060b1824 */
/* 0x001fca00078e020f */
/*0920*/ @P1 STS [R0.X4], R11 ; /* 0x0000000b00001388 */
/* 0x0001e40000004800 */
/*0930*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x001fea0003800000 */
/*0940*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0950*/ @P0 LDS R4, [R4.X4+-0x4] ; /* 0xfffffc0004040984 */
/* 0x000e240000004800 */
/*0960*/ @P0 IMAD.IADD R7, R7, 0x1, R4 ; /* 0x0000000107070824 */
/* 0x001fe400078e0204 */
/*0970*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0980*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */
/* 0x000fc80003f0f070 */
/*0990*/ P2R R6, PR, RZ, 0x1 ; /* 0x00000001ff067803 */
/* 0x000fe20000000000 */
/*09a0*/ STS [R0.X4+0x80], R7 ; /* 0x0000800700007388 */
/* 0x0001e80000004800 */
/*09b0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*09c0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*09d0*/ ISETP.NE.AND P0, PT, R6, RZ, PT ; /* 0x000000ff0600720c */
/* 0x000fda0003f05270 */
/*09e0*/ @!P0 LDS R0, [UR4+-0x4] ; /* 0xfffffc04ff008984 */
/* 0x001e240008000800 */
/*09f0*/ @!P0 IMAD.IADD R5, R5, 0x1, R0 ; /* 0x0000000105058824 */
/* 0x001fe400078e0200 */
/*0a00*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0a10*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe8000c101906 */
/*0a20*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0a30*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0a40*/ BRA 0xa40; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0a50*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0a60*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0a70*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0a80*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0a90*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0aa0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ab0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ac0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ad0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ae0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0af0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z11scan_globalPi
.globl _Z11scan_globalPi
.p2align 8
.type _Z11scan_globalPi,@function
_Z11scan_globalPi:
s_clause 0x1
s_load_b64 s[6:7], s[0:1], 0x0
s_load_b32 s8, s[0:1], 0x14
v_and_b32_e32 v5, 31, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_ne_u32_e64 s0, 0, v5
s_and_saveexec_b32 s1, s0
s_cbranch_execz .LBB0_2
v_dual_mov_b32 v2, 0 :: v_dual_add_nc_u32 v1, -1, v0
v_lshlrev_b32_e32 v3, 2, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[1:2], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v1, vcc_lo, s6, v1
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v2, vcc_lo, s7, v2, vcc_lo
s_clause 0x1
global_load_b32 v1, v[1:2], off
global_load_b32 v2, v3, s[6:7]
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v1, v2, v1
global_store_b32 v3, v1, s[6:7]
.LBB0_2:
s_or_b32 exec_lo, exec_lo, s1
v_cmp_lt_u32_e64 s1, 1, v5
s_delay_alu instid0(VALU_DEP_1)
s_and_saveexec_b32 s2, s1
s_cbranch_execz .LBB0_4
v_dual_mov_b32 v2, 0 :: v_dual_add_nc_u32 v1, -2, v0
v_lshlrev_b32_e32 v3, 2, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[1:2], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v1, vcc_lo, s6, v1
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v2, vcc_lo, s7, v2, vcc_lo
s_clause 0x1
global_load_b32 v1, v[1:2], off
global_load_b32 v2, v3, s[6:7]
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v1, v2, v1
global_store_b32 v3, v1, s[6:7]
.LBB0_4:
s_or_b32 exec_lo, exec_lo, s2
v_cmp_lt_u32_e64 s2, 3, v5
s_delay_alu instid0(VALU_DEP_1)
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_6
v_dual_mov_b32 v2, 0 :: v_dual_add_nc_u32 v1, -4, v0
v_lshlrev_b32_e32 v3, 2, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[1:2], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v1, vcc_lo, s6, v1
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v2, vcc_lo, s7, v2, vcc_lo
s_clause 0x1
global_load_b32 v1, v[1:2], off
global_load_b32 v2, v3, s[6:7]
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v1, v2, v1
global_store_b32 v3, v1, s[6:7]
.LBB0_6:
s_or_b32 exec_lo, exec_lo, s3
v_cmp_lt_u32_e64 s3, 7, v5
s_delay_alu instid0(VALU_DEP_1)
s_and_saveexec_b32 s4, s3
s_cbranch_execz .LBB0_8
v_dual_mov_b32 v2, 0 :: v_dual_add_nc_u32 v1, -8, v0
v_lshlrev_b32_e32 v3, 2, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[1:2], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v1, vcc_lo, s6, v1
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v2, vcc_lo, s7, v2, vcc_lo
s_clause 0x1
global_load_b32 v1, v[1:2], off
global_load_b32 v2, v3, s[6:7]
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v1, v2, v1
global_store_b32 v3, v1, s[6:7]
.LBB0_8:
s_or_b32 exec_lo, exec_lo, s4
v_cmp_lt_u32_e64 s4, 15, v5
s_delay_alu instid0(VALU_DEP_1)
s_and_saveexec_b32 s5, s4
s_cbranch_execz .LBB0_10
v_dual_mov_b32 v2, 0 :: v_dual_add_nc_u32 v1, -16, v0
v_lshlrev_b32_e32 v3, 2, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[1:2], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v1, vcc_lo, s6, v1
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v2, vcc_lo, s7, v2, vcc_lo
s_clause 0x1
global_load_b32 v1, v[1:2], off
global_load_b32 v2, v3, s[6:7]
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v1, v2, v1
global_store_b32 v3, v1, s[6:7]
.LBB0_10:
s_or_b32 exec_lo, exec_lo, s5
v_lshlrev_b32_e32 v1, 2, v0
v_lshrrev_b32_e32 v4, 5, v0
s_waitcnt lgkmcnt(0)
global_load_b32 v3, v1, s[6:7]
v_add_co_u32 v1, s5, s6, v1
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v2, null, s7, 0, s5
v_cmp_eq_u32_e64 s6, 31, v5
s_waitcnt vmcnt(0)
s_waitcnt_vscnt null, 0x0
s_barrier
buffer_gl0_inv
s_and_saveexec_b32 s5, s6
s_cbranch_execz .LBB0_12
global_load_b32 v5, v[1:2], off
v_lshlrev_b32_e32 v6, 2, v4
s_waitcnt vmcnt(0)
ds_store_b32 v6, v5
.LBB0_12:
s_or_b32 exec_lo, exec_lo, s5
v_cmp_lt_u32_e64 s5, 31, v0
v_cmp_gt_u32_e64 s7, 32, v0
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_and_saveexec_b32 s9, s7
s_cbranch_execz .LBB0_23
s_and_saveexec_b32 s10, s0
s_cbranch_execz .LBB0_15
v_lshlrev_b32_e32 v5, 2, v0
s_delay_alu instid0(VALU_DEP_1)
v_add_nc_u32_e32 v6, -4, v5
ds_load_b32 v6, v6
ds_load_b32 v7, v5
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v6, v7, v6
ds_store_b32 v5, v6
.LBB0_15:
s_or_b32 exec_lo, exec_lo, s10
s_and_saveexec_b32 s10, s1
s_cbranch_execz .LBB0_17
v_lshlrev_b32_e32 v5, 2, v0
s_delay_alu instid0(VALU_DEP_1)
v_add_nc_u32_e32 v6, -8, v5
ds_load_b32 v6, v6
ds_load_b32 v7, v5
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v6, v7, v6
ds_store_b32 v5, v6
.LBB0_17:
s_or_b32 exec_lo, exec_lo, s10
s_and_saveexec_b32 s10, s2
s_cbranch_execz .LBB0_19
v_lshlrev_b32_e32 v5, 2, v0
s_delay_alu instid0(VALU_DEP_1)
v_add_nc_u32_e32 v6, -16, v5
ds_load_b32 v6, v6
ds_load_b32 v7, v5
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v6, v7, v6
ds_store_b32 v5, v6
.LBB0_19:
s_or_b32 exec_lo, exec_lo, s10
s_and_saveexec_b32 s10, s3
s_cbranch_execz .LBB0_21
v_lshlrev_b32_e32 v5, 2, v0
s_delay_alu instid0(VALU_DEP_1)
v_subrev_nc_u32_e32 v6, 32, v5
ds_load_b32 v6, v6
ds_load_b32 v7, v5
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v6, v7, v6
ds_store_b32 v5, v6
.LBB0_21:
s_or_b32 exec_lo, exec_lo, s10
s_delay_alu instid0(SALU_CYCLE_1)
s_and_b32 exec_lo, exec_lo, s4
s_cbranch_execz .LBB0_23
v_lshlrev_b32_e32 v5, 2, v0
s_delay_alu instid0(VALU_DEP_1)
v_subrev_nc_u32_e32 v6, 64, v5
ds_load_b32 v6, v6
ds_load_b32 v7, v5
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v6, v7, v6
ds_store_b32 v5, v6
.LBB0_23:
s_or_b32 exec_lo, exec_lo, s9
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_and_saveexec_b32 s9, s5
s_cbranch_execz .LBB0_25
v_lshl_add_u32 v5, v4, 2, -4
ds_load_b32 v5, v5
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v3, v5, v3
.LBB0_25:
s_or_b32 exec_lo, exec_lo, s9
s_and_b32 s8, 0xffff, s8
s_delay_alu instid0(SALU_CYCLE_1)
v_and_b32_e32 v5, s8, v0
s_barrier
buffer_gl0_inv
global_store_b32 v[1:2], v3, off
s_waitcnt_vscnt null, 0x0
v_cmp_eq_u32_e32 vcc_lo, s8, v5
s_barrier
buffer_gl0_inv
s_barrier
buffer_gl0_inv
s_and_saveexec_b32 s8, vcc_lo
s_cbranch_execz .LBB0_27
global_load_b32 v5, v[1:2], off
s_lshl_b32 s9, s15, 2
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_addk_i32 s9, 0x80
v_mov_b32_e32 v6, s9
s_waitcnt vmcnt(0)
ds_store_b32 v6, v5
.LBB0_27:
s_or_b32 exec_lo, exec_lo, s8
s_cmp_lg_u32 s15, 0
s_waitcnt lgkmcnt(0)
s_cselect_b32 s8, -1, 0
s_barrier
s_and_b32 vcc_lo, exec_lo, s8
buffer_gl0_inv
s_cbranch_vccnz .LBB0_54
s_and_saveexec_b32 s9, s0
s_cbranch_execz .LBB0_30
v_lshl_add_u32 v5, v0, 2, 0x80
s_delay_alu instid0(VALU_DEP_1)
v_add_nc_u32_e32 v6, -4, v5
ds_load_b32 v6, v6
ds_load_b32 v7, v5
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v6, v7, v6
ds_store_b32 v5, v6
.LBB0_30:
s_or_b32 exec_lo, exec_lo, s9
s_and_saveexec_b32 s9, s1
s_cbranch_execz .LBB0_32
v_lshl_add_u32 v5, v0, 2, 0x80
s_delay_alu instid0(VALU_DEP_1)
v_add_nc_u32_e32 v6, -8, v5
ds_load_b32 v6, v6
ds_load_b32 v7, v5
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v6, v7, v6
ds_store_b32 v5, v6
.LBB0_32:
s_or_b32 exec_lo, exec_lo, s9
s_and_saveexec_b32 s9, s2
s_cbranch_execz .LBB0_34
v_lshl_add_u32 v5, v0, 2, 0x80
s_delay_alu instid0(VALU_DEP_1)
v_add_nc_u32_e32 v6, -16, v5
ds_load_b32 v6, v6
ds_load_b32 v7, v5
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v6, v7, v6
ds_store_b32 v5, v6
.LBB0_34:
s_or_b32 exec_lo, exec_lo, s9
s_and_saveexec_b32 s9, s3
s_cbranch_execz .LBB0_36
v_lshl_add_u32 v5, v0, 2, 0x80
s_delay_alu instid0(VALU_DEP_1)
v_subrev_nc_u32_e32 v6, 32, v5
ds_load_b32 v6, v6
ds_load_b32 v7, v5
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v6, v7, v6
ds_store_b32 v5, v6
.LBB0_36:
s_or_b32 exec_lo, exec_lo, s9
s_and_saveexec_b32 s9, s4
s_cbranch_execz .LBB0_38
v_lshl_add_u32 v5, v0, 2, 0x80
s_delay_alu instid0(VALU_DEP_1)
v_subrev_nc_u32_e32 v6, 64, v5
ds_load_b32 v6, v6
ds_load_b32 v7, v5
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v6, v7, v6
ds_store_b32 v5, v6
.LBB0_38:
s_or_b32 exec_lo, exec_lo, s9
v_lshl_add_u32 v5, v0, 2, 0x80
ds_load_b32 v6, v5
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_and_saveexec_b32 s9, s6
s_cbranch_execz .LBB0_40
ds_load_b32 v7, v5
v_lshlrev_b32_e32 v8, 2, v4
s_waitcnt lgkmcnt(0)
ds_store_b32 v8, v7
.LBB0_40:
s_or_b32 exec_lo, exec_lo, s9
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_and_saveexec_b32 s6, s7
s_cbranch_execz .LBB0_51
s_and_saveexec_b32 s7, s0
s_cbranch_execz .LBB0_43
v_lshlrev_b32_e32 v7, 2, v0
s_delay_alu instid0(VALU_DEP_1)
v_add_nc_u32_e32 v8, -4, v7
ds_load_b32 v8, v8
ds_load_b32 v9, v7
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v8, v9, v8
ds_store_b32 v7, v8
.LBB0_43:
s_or_b32 exec_lo, exec_lo, s7
s_and_saveexec_b32 s0, s1
s_cbranch_execz .LBB0_45
v_lshlrev_b32_e32 v7, 2, v0
s_delay_alu instid0(VALU_DEP_1)
v_add_nc_u32_e32 v8, -8, v7
ds_load_b32 v8, v8
ds_load_b32 v9, v7
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v8, v9, v8
ds_store_b32 v7, v8
.LBB0_45:
s_or_b32 exec_lo, exec_lo, s0
s_and_saveexec_b32 s0, s2
s_cbranch_execz .LBB0_47
v_lshlrev_b32_e32 v7, 2, v0
s_delay_alu instid0(VALU_DEP_1)
v_add_nc_u32_e32 v8, -16, v7
ds_load_b32 v8, v8
ds_load_b32 v9, v7
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v8, v9, v8
ds_store_b32 v7, v8
.LBB0_47:
s_or_b32 exec_lo, exec_lo, s0
s_and_saveexec_b32 s0, s3
s_cbranch_execz .LBB0_49
v_lshlrev_b32_e32 v7, 2, v0
s_delay_alu instid0(VALU_DEP_1)
v_subrev_nc_u32_e32 v8, 32, v7
ds_load_b32 v8, v8
ds_load_b32 v9, v7
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v8, v9, v8
ds_store_b32 v7, v8
.LBB0_49:
s_or_b32 exec_lo, exec_lo, s0
s_delay_alu instid0(SALU_CYCLE_1)
s_and_b32 exec_lo, exec_lo, s4
s_cbranch_execz .LBB0_51
v_lshlrev_b32_e32 v0, 2, v0
s_delay_alu instid0(VALU_DEP_1)
v_subrev_nc_u32_e32 v7, 64, v0
ds_load_b32 v7, v7
ds_load_b32 v8, v0
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v7, v8, v7
ds_store_b32 v0, v7
.LBB0_51:
s_or_b32 exec_lo, exec_lo, s6
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_and_saveexec_b32 s0, s5
s_cbranch_execz .LBB0_53
v_lshl_add_u32 v0, v4, 2, -4
ds_load_b32 v0, v0
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v6, v0, v6
.LBB0_53:
s_or_b32 exec_lo, exec_lo, s0
s_barrier
buffer_gl0_inv
ds_store_b32 v5, v6
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
.LBB0_54:
s_and_not1_b32 vcc_lo, exec_lo, s8
s_barrier
buffer_gl0_inv
s_cbranch_vccnz .LBB0_56
s_lshl_b32 s0, s15, 2
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_addk_i32 s0, 0x80
s_add_i32 s0, s0, -4
s_delay_alu instid0(SALU_CYCLE_1)
v_mov_b32_e32 v0, s0
ds_load_b32 v0, v0
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v3, v0, v3
.LBB0_56:
s_barrier
buffer_gl0_inv
global_store_b32 v[1:2], v3, off
s_waitcnt_vscnt null, 0x0
s_barrier
buffer_gl0_inv
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z11scan_globalPi
.amdhsa_group_segment_fixed_size 128
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 264
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 10
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z11scan_globalPi, .Lfunc_end0-_Z11scan_globalPi
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: hidden_block_count_x
- .offset: 12
.size: 4
.value_kind: hidden_block_count_y
- .offset: 16
.size: 4
.value_kind: hidden_block_count_z
- .offset: 20
.size: 2
.value_kind: hidden_group_size_x
- .offset: 22
.size: 2
.value_kind: hidden_group_size_y
- .offset: 24
.size: 2
.value_kind: hidden_group_size_z
- .offset: 26
.size: 2
.value_kind: hidden_remainder_x
- .offset: 28
.size: 2
.value_kind: hidden_remainder_y
- .offset: 30
.size: 2
.value_kind: hidden_remainder_z
- .offset: 48
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 72
.size: 2
.value_kind: hidden_grid_dims
- .offset: 128
.size: 4
.value_kind: hidden_dynamic_lds_size
.group_segment_fixed_size: 128
.kernarg_segment_align: 8
.kernarg_segment_size: 264
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z11scan_globalPi
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z11scan_globalPi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 10
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00142c59_00000000-6_Scan.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2062:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2062:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z9scan_warpPij
.type _Z9scan_warpPij, @function
_Z9scan_warpPij:
.LFB2057:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2057:
.size _Z9scan_warpPij, .-_Z9scan_warpPij
.globl _Z10scan_blockPij
.type _Z10scan_blockPij, @function
_Z10scan_blockPij:
.LFB2058:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2058:
.size _Z10scan_blockPij, .-_Z10scan_blockPij
.globl _Z31__device_stub__Z11scan_globalPiPi
.type _Z31__device_stub__Z11scan_globalPiPi, @function
_Z31__device_stub__Z11scan_globalPiPi:
.LFB2084:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 8(%rsp)
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L11
.L7:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L12
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L11:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 120
pushq 24(%rsp)
.cfi_def_cfa_offset 128
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z11scan_globalPi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 112
jmp .L7
.L12:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2084:
.size _Z31__device_stub__Z11scan_globalPiPi, .-_Z31__device_stub__Z11scan_globalPiPi
.globl _Z11scan_globalPi
.type _Z11scan_globalPi, @function
_Z11scan_globalPi:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z31__device_stub__Z11scan_globalPiPi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _Z11scan_globalPi, .-_Z11scan_globalPi
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "/home/ubuntu/Datasets/stackv2/train-structured/wakim/prog-gpu-cuda/master/Scan/src/Scan.cu"
.align 8
.LC1:
.string "Error %s at line %d in file %s\n"
.section .rodata.str1.1,"aMS",@progbits,1
.LC2:
.string "Erro !!!"
.section .rodata.str1.8
.align 8
.LC3:
.string "Input value: %u, device output: %u\n"
.text
.globl main
.type main, @function
main:
.LFB2059:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $312, %rsp
.cfi_def_cfa_offset 368
movq %fs:40, %rax
movq %rax, 296(%rsp)
xorl %eax, %eax
movq $0, (%rsp)
.L16:
movl %eax, 32(%rsp,%rax,4)
addq $1, %rax
cmpq $32, %rax
jne .L16
movq %rsp, %rdi
movl $128, %esi
call cudaMalloc@PLT
testl %eax, %eax
jne .L32
leaq 32(%rsp), %rsi
movl $1, %ecx
movl $128, %edx
movq (%rsp), %rdi
call cudaMemcpy@PLT
testl %eax, %eax
jne .L33
movl $16, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $2, 8(%rsp)
movl $1, 12(%rsp)
movl $1, 16(%rsp)
movl $0, %r9d
movl $64, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L34
.L19:
call cudaDeviceSynchronize@PLT
testl %eax, %eax
jne .L35
call cudaGetLastError@PLT
testl %eax, %eax
jne .L36
leaq 160(%rsp), %rdi
movl $2, %ecx
movl $128, %edx
movq (%rsp), %rsi
call cudaMemcpy@PLT
testl %eax, %eax
jne .L37
movl 32(%rsp), %r12d
leaq 160(%rsp), %rbx
leaq 36(%rsp), %rbp
leaq 284(%rsp), %r15
jmp .L24
.L32:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rcx
leaq .LC0(%rip), %r9
movl $111, %r8d
leaq .LC1(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1, %edi
call exit@PLT
.L33:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rcx
leaq .LC0(%rip), %r9
movl $112, %r8d
leaq .LC1(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1, %edi
call exit@PLT
.L34:
movq (%rsp), %rdi
call _Z31__device_stub__Z11scan_globalPiPi
jmp .L19
.L35:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rcx
leaq .LC0(%rip), %r9
movl $117, %r8d
leaq .LC1(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1, %edi
call exit@PLT
.L36:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rcx
leaq .LC0(%rip), %r9
movl $118, %r8d
leaq .LC1(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1, %edi
call exit@PLT
.L37:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rcx
leaq .LC0(%rip), %r9
movl $119, %r8d
leaq .LC1(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1, %edi
call exit@PLT
.L23:
movl %r12d, %r13d
addl 0(%rbp), %r13d
movl %r13d, %r12d
movl (%r14), %ecx
movl -4(%rbp), %edx
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $4, %rbx
addq $4, %rbp
cmpq %r15, %rbx
je .L38
.L24:
movq %rbx, %r14
cmpl %r12d, (%rbx)
je .L23
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L23
.L38:
cmpl %r13d, 284(%rsp)
jne .L39
.L25:
movl 284(%rsp), %ecx
movl 156(%rsp), %edx
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq (%rsp), %rdi
call cudaFree@PLT
testl %eax, %eax
jne .L40
call cudaDeviceReset@PLT
testl %eax, %eax
jne .L41
movq 296(%rsp), %rax
subq %fs:40, %rax
jne .L42
movl $0, %eax
addq $312, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L39:
.cfi_restore_state
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L25
.L40:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rcx
leaq .LC0(%rip), %r9
movl $137, %r8d
leaq .LC1(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1, %edi
call exit@PLT
.L41:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rcx
leaq .LC0(%rip), %r9
movl $138, %r8d
leaq .LC1(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1, %edi
call exit@PLT
.L42:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2059:
.size main, .-main
.section .rodata.str1.1
.LC4:
.string "_Z11scan_globalPi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2087:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC4(%rip), %rdx
movq %rdx, %rcx
leaq _Z11scan_globalPi(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2087:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "Scan.hip"
.globl _Z26__device_stub__scan_globalPi # -- Begin function _Z26__device_stub__scan_globalPi
.p2align 4, 0x90
.type _Z26__device_stub__scan_globalPi,@function
_Z26__device_stub__scan_globalPi: # @_Z26__device_stub__scan_globalPi
.cfi_startproc
# %bb.0:
subq $72, %rsp
.cfi_def_cfa_offset 80
movq %rdi, 64(%rsp)
leaq 64(%rsp), %rax
movq %rax, (%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
movq %rsp, %r9
movl $_Z11scan_globalPi, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $88, %rsp
.cfi_adjust_cfa_offset -88
retq
.Lfunc_end0:
.size _Z26__device_stub__scan_globalPi, .Lfunc_end0-_Z26__device_stub__scan_globalPi
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $320, %rsp # imm = 0x140
.cfi_def_cfa_offset 352
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq $0, 8(%rsp)
xorl %eax, %eax
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
movl %eax, 192(%rsp,%rax,4)
incq %rax
cmpq $32, %rax
jne .LBB1_1
# %bb.2:
leaq 8(%rsp), %rdi
movl $128, %esi
callq hipMalloc
testl %eax, %eax
jne .LBB1_3
# %bb.5:
movq 8(%rsp), %rdi
leaq 192(%rsp), %rsi
movl $128, %edx
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB1_6
# %bb.7:
movabsq $4294967298, %rdi # imm = 0x100000002
leaq 14(%rdi), %rdx
movl $64, %r8d
movl $1, %esi
movl $1, %ecx
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_9
# %bb.8:
movq 8(%rsp), %rax
movq %rax, 56(%rsp)
leaq 56(%rsp), %rax
movq %rax, 16(%rsp)
leaq 64(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 16(%rsp), %r9
movl $_Z11scan_globalPi, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_9:
callq hipDeviceSynchronize
testl %eax, %eax
jne .LBB1_10
# %bb.11:
callq hipGetLastError
testl %eax, %eax
jne .LBB1_12
# %bb.13:
movq 8(%rsp), %rsi
leaq 64(%rsp), %rdi
movl $128, %edx
movl $2, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB1_14
# %bb.15:
movl 192(%rsp), %ebx
xorl %r14d, %r14d
jmp .LBB1_16
.p2align 4, 0x90
.LBB1_18: # in Loop: Header=BB1_16 Depth=1
leaq 1(%r14), %r15
addl 196(%rsp,%r14,4), %ebx
movl 192(%rsp,%r14,4), %esi
movl 64(%rsp,%r14,4), %edx
movl $.L.str.3, %edi
xorl %eax, %eax
callq printf
movq %r15, %r14
cmpq $31, %r15
je .LBB1_19
.LBB1_16: # =>This Inner Loop Header: Depth=1
cmpl %ebx, 64(%rsp,%r14,4)
je .LBB1_18
# %bb.17: # in Loop: Header=BB1_16 Depth=1
movl $.L.str.2, %edi
xorl %eax, %eax
callq printf
jmp .LBB1_18
.LBB1_19:
cmpl %ebx, 188(%rsp)
je .LBB1_21
# %bb.20:
movl $.L.str.2, %edi
xorl %eax, %eax
callq printf
.LBB1_21:
movl 316(%rsp), %esi
movl 188(%rsp), %edx
movl $.L.str.3, %edi
xorl %eax, %eax
callq printf
movq 8(%rsp), %rdi
callq hipFree
testl %eax, %eax
jne .LBB1_22
# %bb.23:
callq hipDeviceReset
testl %eax, %eax
jne .LBB1_24
# %bb.25:
xorl %eax, %eax
addq $320, %rsp # imm = 0x140
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.LBB1_3:
.cfi_def_cfa_offset 352
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %esi
movl $.L.str.1, %r8d
movq %rbx, %rdi
movq %rax, %rdx
movl $113, %ecx
jmp .LBB1_4
.LBB1_6:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %esi
movl $.L.str.1, %r8d
movq %rbx, %rdi
movq %rax, %rdx
movl $115, %ecx
jmp .LBB1_4
.LBB1_10:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %esi
movl $.L.str.1, %r8d
movq %rbx, %rdi
movq %rax, %rdx
movl $119, %ecx
jmp .LBB1_4
.LBB1_12:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %esi
movl $.L.str.1, %r8d
movq %rbx, %rdi
movq %rax, %rdx
movl $120, %ecx
jmp .LBB1_4
.LBB1_14:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %esi
movl $.L.str.1, %r8d
movq %rbx, %rdi
movq %rax, %rdx
movl $121, %ecx
jmp .LBB1_4
.LBB1_22:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %esi
movl $.L.str.1, %r8d
movq %rbx, %rdi
movq %rax, %rdx
movl $139, %ecx
jmp .LBB1_4
.LBB1_24:
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %esi
movl $.L.str.1, %r8d
movq %rbx, %rdi
movq %rax, %rdx
movl $140, %ecx
.LBB1_4:
xorl %eax, %eax
callq fprintf
movl $1, %edi
callq exit
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z11scan_globalPi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z11scan_globalPi,@object # @_Z11scan_globalPi
.section .rodata,"a",@progbits
.globl _Z11scan_globalPi
.p2align 3, 0x0
_Z11scan_globalPi:
.quad _Z26__device_stub__scan_globalPi
.size _Z11scan_globalPi, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Error %s at line %d in file %s\n"
.size .L.str, 32
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "/home/ubuntu/Datasets/stackv2/train-structured-repos-hip/wakim/prog-gpu-cuda/master/Scan/src/Scan.hip"
.size .L.str.1, 102
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "Erro !!!"
.size .L.str.2, 9
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "Input value: %u, device output: %u\n"
.size .L.str.3, 36
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z11scan_globalPi"
.size .L__unnamed_1, 18
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z26__device_stub__scan_globalPi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z11scan_globalPi
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <iostream>
#include <cuda_runtime_api.h>
#include <cuda.h>
#include <cufft.h>
const int THREADS=64;
__global__ void kernel_divideStable2(float *data1, float *data2, float threadshold, int len);
__global__ void kernel_multiply_complex(float *data1, float *data2, int len);
__global__ void kernel_multiply_real(float *data1, float *data2, int len);
__global__ void kernel_multiplyConj(float *data1, float *data2, int len);
__global__ void kernel_copy(float *data1, float *data2, int len);
using namespace std;
void inv_forwardDFT(float *data, int nx, int ny, int nz){
cufftReal *a=(cufftReal*)data;
cufftComplex *A=(cufftComplex*)data;
cufftHandle plan_forward;
if(nz==1){
cufftPlan2d(&plan_forward, nx, ny, CUFFT_R2C);
cufftExecR2C(plan_forward, a, A);
}
else{
// cout<<"nx= "<<nx<<" ny= "<<ny<<" nz= "<<nz<<endl;
cufftPlan3d(&plan_forward, nx, ny, nz, CUFFT_R2C);
cufftExecR2C(plan_forward, a, A);
}
cufftDestroy(plan_forward);
}
void inv_inverseDFT(float *data, int nx, int ny, int nz){
cufftReal *a=(cufftReal*)data;
cufftComplex *A=(cufftComplex*)data;
cufftHandle plan_backward;
if(nz==1){
cufftPlan2d(&plan_backward, nx, ny, CUFFT_C2R);
cufftExecC2R(plan_backward, A, a);
}
else{
cufftPlan3d(&plan_backward, nx, ny, nz, CUFFT_C2R);
cufftExecC2R(plan_backward, A, a);
}
cufftDestroy(plan_backward);
}
void inv_divideStable2(float *data1, float *data2, float threadshold, int len){
unsigned long int block=(len+THREADS-1)/THREADS;
kernel_divideStable2<<<block, THREADS>>>(data1, data2, threadshold, len);
}
__global__ void kernel_divideStable2(float *data1, float *data2, float threadshold, int len){
int offset=blockDim.x*blockIdx.x+threadIdx.x;
if(offset>=len) return;
if(data2[offset]<threadshold)
data2[offset]=(float)0;
else
data2[offset]=data1[offset]/data2[offset];
}
void inv_multiply_complex(float * data1, float *data2, int len){
unsigned long int block=(len/2+THREADS-1)/THREADS;
kernel_multiply_complex<<<block, THREADS>>>(data1, data2, len/2);
}
__global__ void kernel_multiply_complex(float *data1, float *data2, int len){
int offset=blockDim.x*blockIdx.x+threadIdx.x;
if(offset>=len) return;
float temp1=data1[(2*offset)];
float temp2=data2[(2*offset)];
data1[(2*offset)]=(temp1*temp2-data1[(2*offset+1)]*data2[(2*offset+1)]);
data1[(2*offset+1)]=(temp1*data2[(2*offset+1)]+data1[(2*offset+1)]*temp2);
}
void inv_multiply_real(float *data1, float *data2, int len){
unsigned long int block=(len+THREADS-1)/THREADS;
kernel_multiply_real<<<block, THREADS>>>(data1, data2, len);
}
__global__ void kernel_multiply_real(float *data1, float *data2, int len){
int offset=blockDim.x*blockIdx.x+threadIdx.x;
if(offset>=len) return;
data1[offset] *= data2[offset];
}
void inv_multiplyConj(float *data1, float *data2, int len){
unsigned long int block=(len/2+THREADS-1)/THREADS;
kernel_multiplyConj<<<block, THREADS>>>(data1, data2, len/2);
}
__global__ void kernel_multiplyConj(float *data1, float *data2, int len){
int offset=blockDim.x*blockIdx.x+threadIdx.x;
if(offset>=len) return;
float temp1 = data1[(2 * offset)];
float temp2 = data2[(2 * offset)];
data1[(2*offset)] = (temp1*temp2+data1[(2*offset+1)]*data2[(2*offset+1)]);
data1[(2*offset+1)] = (-temp1*data2[(2*offset+1)]+data1[(2*offset+1)]*temp2);
}
void inv_copy(float *data1, float *data2, int len){
unsigned long int block=(len+THREADS-1)/THREADS;
kernel_copy<<<block, THREADS>>>(data1, data2, len);
}
__global__ void kernel_copy(float *data1, float *data2, int len){
int offset=blockDim.x*blockIdx.x+threadIdx.x;
if(offset>=len) return;
data1[offset]=data2[offset];
} | code for sm_80
Function : _Z11kernel_copyPfS_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R4, SR_CTAID.X ; /* 0x0000000000047919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R4, R4, c[0x0][0x0], R3 ; /* 0x0000000004047a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R4, c[0x0][0x170], PT ; /* 0x00005c0004007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R2, R4, R5, c[0x0][0x168] ; /* 0x00005a0004027625 */
/* 0x000fcc00078e0205 */
/*0090*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*00a0*/ IMAD.WIDE R4, R4, R5, c[0x0][0x160] ; /* 0x0000580004047625 */
/* 0x000fca00078e0205 */
/*00b0*/ STG.E [R4.64], R3 ; /* 0x0000000304007986 */
/* 0x004fe2000c101904 */
/*00c0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00d0*/ BRA 0xd0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z19kernel_multiplyConjPfS_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x170], PT ; /* 0x00005c0000007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fe200000001ff */
/*0070*/ SHF.L.U32 R0, R0, 0x1, RZ ; /* 0x0000000100007819 */
/* 0x000fe200000006ff */
/*0080*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd00000000a00 */
/*0090*/ IMAD.WIDE R4, R0, R3, c[0x0][0x168] ; /* 0x00005a0000047625 */
/* 0x000fc800078e0203 */
/*00a0*/ IMAD.WIDE R2, R0, R3, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fe200078e0203 */
/*00b0*/ LDG.E R6, [R4.64+0x4] ; /* 0x0000040404067981 */
/* 0x000ea8000c1e1900 */
/*00c0*/ LDG.E R9, [R2.64+0x4] ; /* 0x0000040402097981 */
/* 0x000ea8000c1e1900 */
/*00d0*/ LDG.E R0, [R4.64] ; /* 0x0000000404007981 */
/* 0x000ee8000c1e1900 */
/*00e0*/ LDG.E R7, [R2.64] ; /* 0x0000000402077981 */
/* 0x000ee2000c1e1900 */
/*00f0*/ FMUL R6, R6, R9 ; /* 0x0000000906067220 */
/* 0x004fc80000400000 */
/*0100*/ FFMA R11, R0, R7, R6 ; /* 0x00000007000b7223 */
/* 0x008fca0000000006 */
/*0110*/ STG.E [R2.64], R11 ; /* 0x0000000b02007986 */
/* 0x000fe8000c101904 */
/*0120*/ LDG.E R6, [R4.64+0x4] ; /* 0x0000040404067981 */
/* 0x000ea4000c1e1900 */
/*0130*/ FMUL R6, R7, R6 ; /* 0x0000000607067220 */
/* 0x004fc80000400000 */
/*0140*/ FFMA R9, R0, R9, -R6 ; /* 0x0000000900097223 */
/* 0x000fca0000000806 */
/*0150*/ STG.E [R2.64+0x4], R9 ; /* 0x0000040902007986 */
/* 0x000fe2000c101904 */
/*0160*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0170*/ BRA 0x170; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z20kernel_multiply_realPfS_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R4, SR_CTAID.X ; /* 0x0000000000047919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R4, R4, c[0x0][0x0], R3 ; /* 0x0000000004047a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R4, c[0x0][0x170], PT ; /* 0x00005c0004007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R2, R4, R5, c[0x0][0x168] ; /* 0x00005a0004027625 */
/* 0x000fc800078e0205 */
/*0090*/ IMAD.WIDE R4, R4, R5, c[0x0][0x160] ; /* 0x0000580004047625 */
/* 0x000fe400078e0205 */
/*00a0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea8000c1e1900 */
/*00b0*/ LDG.E R0, [R4.64] ; /* 0x0000000404007981 */
/* 0x000ea4000c1e1900 */
/*00c0*/ FMUL R7, R0, R3 ; /* 0x0000000300077220 */
/* 0x004fca0000400000 */
/*00d0*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x000fe2000c101904 */
/*00e0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00f0*/ BRA 0xf0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z23kernel_multiply_complexPfS_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x170], PT ; /* 0x00005c0000007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fe200000001ff */
/*0070*/ SHF.L.U32 R0, R0, 0x1, RZ ; /* 0x0000000100007819 */
/* 0x000fe200000006ff */
/*0080*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd00000000a00 */
/*0090*/ IMAD.WIDE R4, R0, R3, c[0x0][0x168] ; /* 0x00005a0000047625 */
/* 0x000fc800078e0203 */
/*00a0*/ IMAD.WIDE R2, R0, R3, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fe200078e0203 */
/*00b0*/ LDG.E R6, [R4.64+0x4] ; /* 0x0000040404067981 */
/* 0x000ea8000c1e1900 */
/*00c0*/ LDG.E R9, [R2.64+0x4] ; /* 0x0000040402097981 */
/* 0x000ea8000c1e1900 */
/*00d0*/ LDG.E R0, [R4.64] ; /* 0x0000000404007981 */
/* 0x000ee8000c1e1900 */
/*00e0*/ LDG.E R7, [R2.64] ; /* 0x0000000402077981 */
/* 0x000ee2000c1e1900 */
/*00f0*/ FMUL R6, R6, R9 ; /* 0x0000000906067220 */
/* 0x004fc80000400000 */
/*0100*/ FFMA R11, R0, R7, -R6 ; /* 0x00000007000b7223 */
/* 0x008fca0000000806 */
/*0110*/ STG.E [R2.64], R11 ; /* 0x0000000b02007986 */
/* 0x000fe8000c101904 */
/*0120*/ LDG.E R6, [R4.64+0x4] ; /* 0x0000040404067981 */
/* 0x000ea2000c1e1900 */
/*0130*/ FMUL R0, R0, R9 ; /* 0x0000000900007220 */
/* 0x000fc80000400000 */
/*0140*/ FFMA R7, R7, R6, R0 ; /* 0x0000000607077223 */
/* 0x004fca0000000000 */
/*0150*/ STG.E [R2.64+0x4], R7 ; /* 0x0000040702007986 */
/* 0x000fe2000c101904 */
/*0160*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0170*/ BRA 0x170; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z20kernel_divideStable2PfS_fi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x174], PT ; /* 0x00005d0000007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fe200078e00ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*0080*/ IMAD.WIDE R2, R0, R3, c[0x0][0x168] ; /* 0x00005a0000027625 */
/* 0x000fca00078e0203 */
/*0090*/ LDG.E R6, [R2.64] ; /* 0x0000000402067981 */
/* 0x000ea4000c1e1900 */
/*00a0*/ FSETP.GEU.AND P0, PT, R6, c[0x0][0x170], PT ; /* 0x00005c0006007a0b */
/* 0x004fda0003f0e000 */
/*00b0*/ @!P0 BRA 0x1e0 ; /* 0x0000012000008947 */
/* 0x000fea0003800000 */
/*00c0*/ SHF.R.S32.HI R5, RZ, 0x1f, R0 ; /* 0x0000001fff057819 */
/* 0x000fe40000011400 */
/*00d0*/ LEA R4, P0, R0, c[0x0][0x160], 0x2 ; /* 0x0000580000047a11 */
/* 0x000fc800078010ff */
/*00e0*/ LEA.HI.X R5, R0, c[0x0][0x164], R5, 0x2, P0 ; /* 0x0000590000057a11 */
/* 0x000fcc00000f1405 */
/*00f0*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea2000c1e1900 */
/*0100*/ MUFU.RCP R7, R6 ; /* 0x0000000600077308 */
/* 0x000e220000001000 */
/*0110*/ BSSY B0, 0x1c0 ; /* 0x000000a000007945 */
/* 0x000fe20003800000 */
/*0120*/ FFMA R0, -R6, R7, 1 ; /* 0x3f80000006007423 */
/* 0x001fc80000000107 */
/*0130*/ FFMA R0, R7, R0, R7 ; /* 0x0000000007007223 */
/* 0x000fe40000000007 */
/*0140*/ FCHK P0, R5, R6 ; /* 0x0000000605007302 */
/* 0x004e240000000000 */
/*0150*/ FFMA R7, R5, R0, RZ ; /* 0x0000000005077223 */
/* 0x000fc800000000ff */
/*0160*/ FFMA R8, -R6, R7, R5 ; /* 0x0000000706087223 */
/* 0x000fc80000000105 */
/*0170*/ FFMA R7, R0, R8, R7 ; /* 0x0000000800077223 */
/* 0x000fe20000000007 */
/*0180*/ @!P0 BRA 0x1b0 ; /* 0x0000002000008947 */
/* 0x001fea0003800000 */
/*0190*/ MOV R0, 0x1b0 ; /* 0x000001b000007802 */
/* 0x000fe40000000f00 */
/*01a0*/ CALL.REL.NOINC 0x200 ; /* 0x0000005000007944 */
/* 0x000fea0003c00000 */
/*01b0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*01c0*/ STG.E [R2.64], R7 ; /* 0x0000000702007986 */
/* 0x000fe2000c101904 */
/*01d0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*01e0*/ STG.E [R2.64], RZ ; /* 0x000000ff02007986 */
/* 0x000fe2000c101904 */
/*01f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0200*/ SHF.R.U32.HI R7, RZ, 0x17, R6.reuse ; /* 0x00000017ff077819 */
/* 0x100fe20000011606 */
/*0210*/ BSSY B1, 0x860 ; /* 0x0000064000017945 */
/* 0x000fe20003800000 */
/*0220*/ SHF.R.U32.HI R4, RZ, 0x17, R5.reuse ; /* 0x00000017ff047819 */
/* 0x100fe20000011605 */
/*0230*/ IMAD.MOV.U32 R8, RZ, RZ, R5 ; /* 0x000000ffff087224 */
/* 0x000fe200078e0005 */
/*0240*/ LOP3.LUT R7, R7, 0xff, RZ, 0xc0, !PT ; /* 0x000000ff07077812 */
/* 0x000fe200078ec0ff */
/*0250*/ IMAD.MOV.U32 R9, RZ, RZ, R6 ; /* 0x000000ffff097224 */
/* 0x000fe200078e0006 */
/*0260*/ LOP3.LUT R4, R4, 0xff, RZ, 0xc0, !PT ; /* 0x000000ff04047812 */
/* 0x000fe400078ec0ff */
/*0270*/ IADD3 R12, R7, -0x1, RZ ; /* 0xffffffff070c7810 */
/* 0x000fc40007ffe0ff */
/*0280*/ IADD3 R11, R4, -0x1, RZ ; /* 0xffffffff040b7810 */
/* 0x000fe40007ffe0ff */
/*0290*/ ISETP.GT.U32.AND P0, PT, R12, 0xfd, PT ; /* 0x000000fd0c00780c */
/* 0x000fc80003f04070 */
/*02a0*/ ISETP.GT.U32.OR P0, PT, R11, 0xfd, P0 ; /* 0x000000fd0b00780c */
/* 0x000fda0000704470 */
/*02b0*/ @!P0 IMAD.MOV.U32 R10, RZ, RZ, RZ ; /* 0x000000ffff0a8224 */
/* 0x000fe200078e00ff */
/*02c0*/ @!P0 BRA 0x440 ; /* 0x0000017000008947 */
/* 0x000fea0003800000 */
/*02d0*/ FSETP.GTU.FTZ.AND P0, PT, |R5|, +INF , PT ; /* 0x7f8000000500780b */
/* 0x000fe40003f1c200 */
/*02e0*/ FSETP.GTU.FTZ.AND P1, PT, |R6|, +INF , PT ; /* 0x7f8000000600780b */
/* 0x000fc80003f3c200 */
/*02f0*/ PLOP3.LUT P0, PT, P0, P1, PT, 0xa8, 0x0 ; /* 0x000000000000781c */
/* 0x000fda0000703570 */
/*0300*/ @P0 BRA 0x840 ; /* 0x0000053000000947 */
/* 0x000fea0003800000 */
/*0310*/ LOP3.LUT P0, RZ, R9, 0x7fffffff, R8, 0xc8, !PT ; /* 0x7fffffff09ff7812 */
/* 0x000fda000780c808 */
/*0320*/ @!P0 BRA 0x820 ; /* 0x000004f000008947 */
/* 0x000fea0003800000 */
/*0330*/ FSETP.NEU.FTZ.AND P2, PT, |R5|.reuse, +INF , PT ; /* 0x7f8000000500780b */
/* 0x040fe40003f5d200 */
/*0340*/ FSETP.NEU.FTZ.AND P1, PT, |R6|, +INF , PT ; /* 0x7f8000000600780b */
/* 0x000fe40003f3d200 */
/*0350*/ FSETP.NEU.FTZ.AND P0, PT, |R5|, +INF , PT ; /* 0x7f8000000500780b */
/* 0x000fd60003f1d200 */
/*0360*/ @!P1 BRA !P2, 0x820 ; /* 0x000004b000009947 */
/* 0x000fea0005000000 */
/*0370*/ LOP3.LUT P2, RZ, R8, 0x7fffffff, RZ, 0xc0, !PT ; /* 0x7fffffff08ff7812 */
/* 0x000fc8000784c0ff */
/*0380*/ PLOP3.LUT P1, PT, P1, P2, PT, 0x2a, 0x0 ; /* 0x000000000000781c */
/* 0x000fda0000f24572 */
/*0390*/ @P1 BRA 0x800 ; /* 0x0000046000001947 */
/* 0x000fea0003800000 */
/*03a0*/ LOP3.LUT P1, RZ, R9, 0x7fffffff, RZ, 0xc0, !PT ; /* 0x7fffffff09ff7812 */
/* 0x000fc8000782c0ff */
/*03b0*/ PLOP3.LUT P0, PT, P0, P1, PT, 0x2a, 0x0 ; /* 0x000000000000781c */
/* 0x000fda0000702572 */
/*03c0*/ @P0 BRA 0x7d0 ; /* 0x0000040000000947 */
/* 0x000fea0003800000 */
/*03d0*/ ISETP.GE.AND P0, PT, R11, RZ, PT ; /* 0x000000ff0b00720c */
/* 0x000fe40003f06270 */
/*03e0*/ ISETP.GE.AND P1, PT, R12, RZ, PT ; /* 0x000000ff0c00720c */
/* 0x000fd60003f26270 */
/*03f0*/ @P0 IMAD.MOV.U32 R10, RZ, RZ, RZ ; /* 0x000000ffff0a0224 */
/* 0x000fe400078e00ff */
/*0400*/ @!P0 IMAD.MOV.U32 R10, RZ, RZ, -0x40 ; /* 0xffffffc0ff0a8424 */
/* 0x000fe400078e00ff */
/*0410*/ @!P0 FFMA R8, R5, 1.84467440737095516160e+19, RZ ; /* 0x5f80000005088823 */
/* 0x000fe400000000ff */
/*0420*/ @!P1 FFMA R9, R6, 1.84467440737095516160e+19, RZ ; /* 0x5f80000006099823 */
/* 0x000fe200000000ff */
/*0430*/ @!P1 IADD3 R10, R10, 0x40, RZ ; /* 0x000000400a0a9810 */
/* 0x000fe40007ffe0ff */
/*0440*/ LEA R6, R7, 0xc0800000, 0x17 ; /* 0xc080000007067811 */
/* 0x000fe200078eb8ff */
/*0450*/ BSSY B2, 0x7c0 ; /* 0x0000036000027945 */
/* 0x000fe20003800000 */
/*0460*/ IADD3 R5, R4, -0x7f, RZ ; /* 0xffffff8104057810 */
/* 0x000fc60007ffe0ff */
/*0470*/ IMAD.IADD R6, R9, 0x1, -R6 ; /* 0x0000000109067824 */
/* 0x000fe200078e0a06 */
/*0480*/ IADD3 R7, R5.reuse, 0x7f, -R7 ; /* 0x0000007f05077810 */
/* 0x040fe20007ffe807 */
/*0490*/ IMAD R8, R5, -0x800000, R8 ; /* 0xff80000005087824 */
/* 0x000fe400078e0208 */
/*04a0*/ MUFU.RCP R9, R6 ; /* 0x0000000600097308 */
/* 0x000e220000001000 */
/*04b0*/ FADD.FTZ R11, -R6, -RZ ; /* 0x800000ff060b7221 */
/* 0x000fe40000010100 */
/*04c0*/ IMAD.IADD R7, R7, 0x1, R10 ; /* 0x0000000107077824 */
/* 0x000fe400078e020a */
/*04d0*/ FFMA R4, R9, R11, 1 ; /* 0x3f80000009047423 */
/* 0x001fc8000000000b */
/*04e0*/ FFMA R13, R9, R4, R9 ; /* 0x00000004090d7223 */
/* 0x000fc80000000009 */
/*04f0*/ FFMA R4, R8, R13, RZ ; /* 0x0000000d08047223 */
/* 0x000fc800000000ff */
/*0500*/ FFMA R9, R11, R4, R8 ; /* 0x000000040b097223 */
/* 0x000fc80000000008 */
/*0510*/ FFMA R12, R13, R9, R4 ; /* 0x000000090d0c7223 */
/* 0x000fc80000000004 */
/*0520*/ FFMA R8, R11, R12, R8 ; /* 0x0000000c0b087223 */
/* 0x000fc80000000008 */
/*0530*/ FFMA R4, R13, R8, R12 ; /* 0x000000080d047223 */
/* 0x000fca000000000c */
/*0540*/ SHF.R.U32.HI R5, RZ, 0x17, R4 ; /* 0x00000017ff057819 */
/* 0x000fc80000011604 */
/*0550*/ LOP3.LUT R5, R5, 0xff, RZ, 0xc0, !PT ; /* 0x000000ff05057812 */
/* 0x000fca00078ec0ff */
/*0560*/ IMAD.IADD R9, R5, 0x1, R7 ; /* 0x0000000105097824 */
/* 0x000fca00078e0207 */
/*0570*/ IADD3 R5, R9, -0x1, RZ ; /* 0xffffffff09057810 */
/* 0x000fc80007ffe0ff */
/*0580*/ ISETP.GE.U32.AND P0, PT, R5, 0xfe, PT ; /* 0x000000fe0500780c */
/* 0x000fda0003f06070 */
/*0590*/ @!P0 BRA 0x7a0 ; /* 0x0000020000008947 */
/* 0x000fea0003800000 */
/*05a0*/ ISETP.GT.AND P0, PT, R9, 0xfe, PT ; /* 0x000000fe0900780c */
/* 0x000fda0003f04270 */
/*05b0*/ @P0 BRA 0x770 ; /* 0x000001b000000947 */
/* 0x000fea0003800000 */
/*05c0*/ ISETP.GE.AND P0, PT, R9, 0x1, PT ; /* 0x000000010900780c */
/* 0x000fda0003f06270 */
/*05d0*/ @P0 BRA 0x7b0 ; /* 0x000001d000000947 */
/* 0x000fea0003800000 */
/*05e0*/ ISETP.GE.AND P0, PT, R9, -0x18, PT ; /* 0xffffffe80900780c */
/* 0x000fe40003f06270 */
/*05f0*/ LOP3.LUT R4, R4, 0x80000000, RZ, 0xc0, !PT ; /* 0x8000000004047812 */
/* 0x000fd600078ec0ff */
/*0600*/ @!P0 BRA 0x7b0 ; /* 0x000001a000008947 */
/* 0x000fea0003800000 */
/*0610*/ FFMA.RZ R5, R13, R8.reuse, R12.reuse ; /* 0x000000080d057223 */
/* 0x180fe2000000c00c */
/*0620*/ ISETP.NE.AND P2, PT, R9, RZ, PT ; /* 0x000000ff0900720c */
/* 0x000fe20003f45270 */
/*0630*/ FFMA.RM R6, R13, R8.reuse, R12.reuse ; /* 0x000000080d067223 */
/* 0x180fe2000000400c */
/*0640*/ ISETP.NE.AND P1, PT, R9, RZ, PT ; /* 0x000000ff0900720c */
/* 0x000fe40003f25270 */
/*0650*/ LOP3.LUT R7, R5, 0x7fffff, RZ, 0xc0, !PT ; /* 0x007fffff05077812 */
/* 0x000fe200078ec0ff */
/*0660*/ FFMA.RP R5, R13, R8, R12 ; /* 0x000000080d057223 */
/* 0x000fe2000000800c */
/*0670*/ IADD3 R8, R9, 0x20, RZ ; /* 0x0000002009087810 */
/* 0x000fe20007ffe0ff */
/*0680*/ IMAD.MOV R9, RZ, RZ, -R9 ; /* 0x000000ffff097224 */
/* 0x000fe200078e0a09 */
/*0690*/ LOP3.LUT R7, R7, 0x800000, RZ, 0xfc, !PT ; /* 0x0080000007077812 */
/* 0x000fe400078efcff */
/*06a0*/ FSETP.NEU.FTZ.AND P0, PT, R5, R6, PT ; /* 0x000000060500720b */
/* 0x000fc40003f1d000 */
/*06b0*/ SHF.L.U32 R8, R7, R8, RZ ; /* 0x0000000807087219 */
/* 0x000fe400000006ff */
/*06c0*/ SEL R6, R9, RZ, P2 ; /* 0x000000ff09067207 */
/* 0x000fe40001000000 */
/*06d0*/ ISETP.NE.AND P1, PT, R8, RZ, P1 ; /* 0x000000ff0800720c */
/* 0x000fe40000f25270 */
/*06e0*/ SHF.R.U32.HI R6, RZ, R6, R7 ; /* 0x00000006ff067219 */
/* 0x000fe40000011607 */
/*06f0*/ PLOP3.LUT P0, PT, P0, P1, PT, 0xa8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40000703570 */
/*0700*/ SHF.R.U32.HI R8, RZ, 0x1, R6 ; /* 0x00000001ff087819 */
/* 0x000fc40000011606 */
/*0710*/ SEL R5, RZ, 0x1, !P0 ; /* 0x00000001ff057807 */
/* 0x000fc80004000000 */
/*0720*/ LOP3.LUT R5, R5, 0x1, R8, 0xf8, !PT ; /* 0x0000000105057812 */
/* 0x000fc800078ef808 */
/*0730*/ LOP3.LUT R5, R5, R6, RZ, 0xc0, !PT ; /* 0x0000000605057212 */
/* 0x000fca00078ec0ff */
/*0740*/ IMAD.IADD R5, R8, 0x1, R5 ; /* 0x0000000108057824 */
/* 0x000fca00078e0205 */
/*0750*/ LOP3.LUT R4, R5, R4, RZ, 0xfc, !PT ; /* 0x0000000405047212 */
/* 0x000fe200078efcff */
/*0760*/ BRA 0x7b0 ; /* 0x0000004000007947 */
/* 0x000fea0003800000 */
/*0770*/ LOP3.LUT R4, R4, 0x80000000, RZ, 0xc0, !PT ; /* 0x8000000004047812 */
/* 0x000fc800078ec0ff */
/*0780*/ LOP3.LUT R4, R4, 0x7f800000, RZ, 0xfc, !PT ; /* 0x7f80000004047812 */
/* 0x000fe200078efcff */
/*0790*/ BRA 0x7b0 ; /* 0x0000001000007947 */
/* 0x000fea0003800000 */
/*07a0*/ IMAD R4, R7, 0x800000, R4 ; /* 0x0080000007047824 */
/* 0x000fe400078e0204 */
/*07b0*/ BSYNC B2 ; /* 0x0000000000027941 */
/* 0x000fea0003800000 */
/*07c0*/ BRA 0x850 ; /* 0x0000008000007947 */
/* 0x000fea0003800000 */
/*07d0*/ LOP3.LUT R4, R9, 0x80000000, R8, 0x48, !PT ; /* 0x8000000009047812 */
/* 0x000fc800078e4808 */
/*07e0*/ LOP3.LUT R4, R4, 0x7f800000, RZ, 0xfc, !PT ; /* 0x7f80000004047812 */
/* 0x000fe200078efcff */
/*07f0*/ BRA 0x850 ; /* 0x0000005000007947 */
/* 0x000fea0003800000 */
/*0800*/ LOP3.LUT R4, R9, 0x80000000, R8, 0x48, !PT ; /* 0x8000000009047812 */
/* 0x000fe200078e4808 */
/*0810*/ BRA 0x850 ; /* 0x0000003000007947 */
/* 0x000fea0003800000 */
/*0820*/ MUFU.RSQ R4, -QNAN ; /* 0xffc0000000047908 */
/* 0x000e220000001400 */
/*0830*/ BRA 0x850 ; /* 0x0000001000007947 */
/* 0x000fea0003800000 */
/*0840*/ FADD.FTZ R4, R5, R6 ; /* 0x0000000605047221 */
/* 0x000fe40000010000 */
/*0850*/ BSYNC B1 ; /* 0x0000000000017941 */
/* 0x000fea0003800000 */
/*0860*/ IMAD.MOV.U32 R7, RZ, RZ, R4 ; /* 0x000000ffff077224 */
/* 0x001fe400078e0004 */
/*0870*/ IMAD.MOV.U32 R4, RZ, RZ, R0 ; /* 0x000000ffff047224 */
/* 0x000fe400078e0000 */
/*0880*/ IMAD.MOV.U32 R5, RZ, RZ, 0x0 ; /* 0x00000000ff057424 */
/* 0x000fc800078e00ff */
/*0890*/ RET.REL.NODEC R4 0x0 ; /* 0xfffff76004007950 */
/* 0x000fea0003c3ffff */
/*08a0*/ BRA 0x8a0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*08b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*08c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*08d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*08e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*08f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0900*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0910*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0920*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0930*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0940*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0950*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0960*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0970*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <iostream>
#include <cuda_runtime_api.h>
#include <cuda.h>
#include <cufft.h>
const int THREADS=64;
__global__ void kernel_divideStable2(float *data1, float *data2, float threadshold, int len);
__global__ void kernel_multiply_complex(float *data1, float *data2, int len);
__global__ void kernel_multiply_real(float *data1, float *data2, int len);
__global__ void kernel_multiplyConj(float *data1, float *data2, int len);
__global__ void kernel_copy(float *data1, float *data2, int len);
using namespace std;
void inv_forwardDFT(float *data, int nx, int ny, int nz){
cufftReal *a=(cufftReal*)data;
cufftComplex *A=(cufftComplex*)data;
cufftHandle plan_forward;
if(nz==1){
cufftPlan2d(&plan_forward, nx, ny, CUFFT_R2C);
cufftExecR2C(plan_forward, a, A);
}
else{
// cout<<"nx= "<<nx<<" ny= "<<ny<<" nz= "<<nz<<endl;
cufftPlan3d(&plan_forward, nx, ny, nz, CUFFT_R2C);
cufftExecR2C(plan_forward, a, A);
}
cufftDestroy(plan_forward);
}
void inv_inverseDFT(float *data, int nx, int ny, int nz){
cufftReal *a=(cufftReal*)data;
cufftComplex *A=(cufftComplex*)data;
cufftHandle plan_backward;
if(nz==1){
cufftPlan2d(&plan_backward, nx, ny, CUFFT_C2R);
cufftExecC2R(plan_backward, A, a);
}
else{
cufftPlan3d(&plan_backward, nx, ny, nz, CUFFT_C2R);
cufftExecC2R(plan_backward, A, a);
}
cufftDestroy(plan_backward);
}
void inv_divideStable2(float *data1, float *data2, float threadshold, int len){
unsigned long int block=(len+THREADS-1)/THREADS;
kernel_divideStable2<<<block, THREADS>>>(data1, data2, threadshold, len);
}
__global__ void kernel_divideStable2(float *data1, float *data2, float threadshold, int len){
int offset=blockDim.x*blockIdx.x+threadIdx.x;
if(offset>=len) return;
if(data2[offset]<threadshold)
data2[offset]=(float)0;
else
data2[offset]=data1[offset]/data2[offset];
}
void inv_multiply_complex(float * data1, float *data2, int len){
unsigned long int block=(len/2+THREADS-1)/THREADS;
kernel_multiply_complex<<<block, THREADS>>>(data1, data2, len/2);
}
__global__ void kernel_multiply_complex(float *data1, float *data2, int len){
int offset=blockDim.x*blockIdx.x+threadIdx.x;
if(offset>=len) return;
float temp1=data1[(2*offset)];
float temp2=data2[(2*offset)];
data1[(2*offset)]=(temp1*temp2-data1[(2*offset+1)]*data2[(2*offset+1)]);
data1[(2*offset+1)]=(temp1*data2[(2*offset+1)]+data1[(2*offset+1)]*temp2);
}
void inv_multiply_real(float *data1, float *data2, int len){
unsigned long int block=(len+THREADS-1)/THREADS;
kernel_multiply_real<<<block, THREADS>>>(data1, data2, len);
}
__global__ void kernel_multiply_real(float *data1, float *data2, int len){
int offset=blockDim.x*blockIdx.x+threadIdx.x;
if(offset>=len) return;
data1[offset] *= data2[offset];
}
void inv_multiplyConj(float *data1, float *data2, int len){
unsigned long int block=(len/2+THREADS-1)/THREADS;
kernel_multiplyConj<<<block, THREADS>>>(data1, data2, len/2);
}
__global__ void kernel_multiplyConj(float *data1, float *data2, int len){
int offset=blockDim.x*blockIdx.x+threadIdx.x;
if(offset>=len) return;
float temp1 = data1[(2 * offset)];
float temp2 = data2[(2 * offset)];
data1[(2*offset)] = (temp1*temp2+data1[(2*offset+1)]*data2[(2*offset+1)]);
data1[(2*offset+1)] = (-temp1*data2[(2*offset+1)]+data1[(2*offset+1)]*temp2);
}
void inv_copy(float *data1, float *data2, int len){
unsigned long int block=(len+THREADS-1)/THREADS;
kernel_copy<<<block, THREADS>>>(data1, data2, len);
}
__global__ void kernel_copy(float *data1, float *data2, int len){
int offset=blockDim.x*blockIdx.x+threadIdx.x;
if(offset>=len) return;
data1[offset]=data2[offset];
} | .file "tmpxft_00049d04_00000000-6_Fourier.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3701:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3701:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z14inv_forwardDFTPfiii
.type _Z14inv_forwardDFTPfiii, @function
_Z14inv_forwardDFTPfiii:
.LFB3692:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
subq $16, %rsp
.cfi_def_cfa_offset 32
movq %rdi, %rbx
movq %fs:40, %rax
movq %rax, 8(%rsp)
xorl %eax, %eax
cmpl $1, %ecx
je .L8
leaq 4(%rsp), %rdi
movl $42, %r8d
call cufftPlan3d@PLT
movq %rbx, %rdx
movq %rbx, %rsi
movl 4(%rsp), %edi
call cufftExecR2C@PLT
.L5:
movl 4(%rsp), %edi
call cufftDestroy@PLT
movq 8(%rsp), %rax
subq %fs:40, %rax
jne .L9
addq $16, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
ret
.L8:
.cfi_restore_state
leaq 4(%rsp), %rdi
movl $42, %ecx
call cufftPlan2d@PLT
movq %rbx, %rdx
movq %rbx, %rsi
movl 4(%rsp), %edi
call cufftExecR2C@PLT
jmp .L5
.L9:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3692:
.size _Z14inv_forwardDFTPfiii, .-_Z14inv_forwardDFTPfiii
.globl _Z14inv_inverseDFTPfiii
.type _Z14inv_inverseDFTPfiii, @function
_Z14inv_inverseDFTPfiii:
.LFB3693:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
subq $16, %rsp
.cfi_def_cfa_offset 32
movq %rdi, %rbx
movq %fs:40, %rax
movq %rax, 8(%rsp)
xorl %eax, %eax
cmpl $1, %ecx
je .L15
leaq 4(%rsp), %rdi
movl $44, %r8d
call cufftPlan3d@PLT
movq %rbx, %rdx
movq %rbx, %rsi
movl 4(%rsp), %edi
call cufftExecC2R@PLT
.L12:
movl 4(%rsp), %edi
call cufftDestroy@PLT
movq 8(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $16, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
leaq 4(%rsp), %rdi
movl $44, %ecx
call cufftPlan2d@PLT
movq %rbx, %rdx
movq %rbx, %rsi
movl 4(%rsp), %edi
call cufftExecC2R@PLT
jmp .L12
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3693:
.size _Z14inv_inverseDFTPfiii, .-_Z14inv_inverseDFTPfiii
.globl _Z44__device_stub__Z20kernel_divideStable2PfS_fiPfS_fi
.type _Z44__device_stub__Z20kernel_divideStable2PfS_fiPfS_fi, @function
_Z44__device_stub__Z20kernel_divideStable2PfS_fiPfS_fi:
.LFB3723:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movss %xmm0, 12(%rsp)
movl %edx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L21
.L17:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L22
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L21:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z20kernel_divideStable2PfS_fi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L17
.L22:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3723:
.size _Z44__device_stub__Z20kernel_divideStable2PfS_fiPfS_fi, .-_Z44__device_stub__Z20kernel_divideStable2PfS_fiPfS_fi
.globl _Z20kernel_divideStable2PfS_fi
.type _Z20kernel_divideStable2PfS_fi, @function
_Z20kernel_divideStable2PfS_fi:
.LFB3724:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z44__device_stub__Z20kernel_divideStable2PfS_fiPfS_fi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3724:
.size _Z20kernel_divideStable2PfS_fi, .-_Z20kernel_divideStable2PfS_fi
.globl _Z17inv_divideStable2PfS_fi
.type _Z17inv_divideStable2PfS_fi, @function
_Z17inv_divideStable2PfS_fi:
.LFB3694:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $48, %rsp
.cfi_def_cfa_offset 80
movq %rdi, %rbp
movq %rsi, %r12
movss %xmm0, 12(%rsp)
movl %edx, %ebx
movl $64, 36(%rsp)
movl $1, 40(%rsp)
leal 126(%rdx), %eax
addl $63, %edx
cmovns %edx, %eax
sarl $6, %eax
movl %eax, 24(%rsp)
movl $1, 28(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 36(%rsp), %rdx
movl $1, %ecx
movq 24(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L28
.L25:
addq $48, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L28:
.cfi_restore_state
movl %ebx, %edx
movss 12(%rsp), %xmm0
movq %r12, %rsi
movq %rbp, %rdi
call _Z44__device_stub__Z20kernel_divideStable2PfS_fiPfS_fi
jmp .L25
.cfi_endproc
.LFE3694:
.size _Z17inv_divideStable2PfS_fi, .-_Z17inv_divideStable2PfS_fi
.globl _Z46__device_stub__Z23kernel_multiply_complexPfS_iPfS_i
.type _Z46__device_stub__Z23kernel_multiply_complexPfS_iPfS_i, @function
_Z46__device_stub__Z23kernel_multiply_complexPfS_iPfS_i:
.LFB3725:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L33
.L29:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L34
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L33:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z23kernel_multiply_complexPfS_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L29
.L34:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3725:
.size _Z46__device_stub__Z23kernel_multiply_complexPfS_iPfS_i, .-_Z46__device_stub__Z23kernel_multiply_complexPfS_iPfS_i
.globl _Z23kernel_multiply_complexPfS_i
.type _Z23kernel_multiply_complexPfS_i, @function
_Z23kernel_multiply_complexPfS_i:
.LFB3726:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z46__device_stub__Z23kernel_multiply_complexPfS_iPfS_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3726:
.size _Z23kernel_multiply_complexPfS_i, .-_Z23kernel_multiply_complexPfS_i
.globl _Z20inv_multiply_complexPfS_i
.type _Z20inv_multiply_complexPfS_i, @function
_Z20inv_multiply_complexPfS_i:
.LFB3695:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $32, %rsp
.cfi_def_cfa_offset 64
movq %rdi, %rbp
movq %rsi, %r12
movl %edx, %eax
shrl $31, %eax
leal (%rax,%rdx), %ebx
sarl %ebx
movl $64, 20(%rsp)
movl $1, 24(%rsp)
leal 126(%rbx), %eax
movl %ebx, %edx
addl $63, %edx
cmovns %edx, %eax
sarl $6, %eax
movl %eax, 8(%rsp)
movl $1, 12(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L40
.L37:
addq $32, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L40:
.cfi_restore_state
movl %ebx, %edx
movq %r12, %rsi
movq %rbp, %rdi
call _Z46__device_stub__Z23kernel_multiply_complexPfS_iPfS_i
jmp .L37
.cfi_endproc
.LFE3695:
.size _Z20inv_multiply_complexPfS_i, .-_Z20inv_multiply_complexPfS_i
.globl _Z43__device_stub__Z20kernel_multiply_realPfS_iPfS_i
.type _Z43__device_stub__Z20kernel_multiply_realPfS_iPfS_i, @function
_Z43__device_stub__Z20kernel_multiply_realPfS_iPfS_i:
.LFB3727:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L45
.L41:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L46
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L45:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z20kernel_multiply_realPfS_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L41
.L46:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3727:
.size _Z43__device_stub__Z20kernel_multiply_realPfS_iPfS_i, .-_Z43__device_stub__Z20kernel_multiply_realPfS_iPfS_i
.globl _Z20kernel_multiply_realPfS_i
.type _Z20kernel_multiply_realPfS_i, @function
_Z20kernel_multiply_realPfS_i:
.LFB3728:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z43__device_stub__Z20kernel_multiply_realPfS_iPfS_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3728:
.size _Z20kernel_multiply_realPfS_i, .-_Z20kernel_multiply_realPfS_i
.globl _Z17inv_multiply_realPfS_i
.type _Z17inv_multiply_realPfS_i, @function
_Z17inv_multiply_realPfS_i:
.LFB3696:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $32, %rsp
.cfi_def_cfa_offset 64
movq %rdi, %rbp
movq %rsi, %r12
movl %edx, %ebx
movl $64, 20(%rsp)
movl $1, 24(%rsp)
leal 126(%rdx), %eax
addl $63, %edx
cmovns %edx, %eax
sarl $6, %eax
movl %eax, 8(%rsp)
movl $1, 12(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L52
.L49:
addq $32, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L52:
.cfi_restore_state
movl %ebx, %edx
movq %r12, %rsi
movq %rbp, %rdi
call _Z43__device_stub__Z20kernel_multiply_realPfS_iPfS_i
jmp .L49
.cfi_endproc
.LFE3696:
.size _Z17inv_multiply_realPfS_i, .-_Z17inv_multiply_realPfS_i
.globl _Z42__device_stub__Z19kernel_multiplyConjPfS_iPfS_i
.type _Z42__device_stub__Z19kernel_multiplyConjPfS_iPfS_i, @function
_Z42__device_stub__Z19kernel_multiplyConjPfS_iPfS_i:
.LFB3729:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L57
.L53:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L58
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L57:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z19kernel_multiplyConjPfS_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L53
.L58:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3729:
.size _Z42__device_stub__Z19kernel_multiplyConjPfS_iPfS_i, .-_Z42__device_stub__Z19kernel_multiplyConjPfS_iPfS_i
.globl _Z19kernel_multiplyConjPfS_i
.type _Z19kernel_multiplyConjPfS_i, @function
_Z19kernel_multiplyConjPfS_i:
.LFB3730:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z42__device_stub__Z19kernel_multiplyConjPfS_iPfS_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3730:
.size _Z19kernel_multiplyConjPfS_i, .-_Z19kernel_multiplyConjPfS_i
.globl _Z16inv_multiplyConjPfS_i
.type _Z16inv_multiplyConjPfS_i, @function
_Z16inv_multiplyConjPfS_i:
.LFB3697:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $32, %rsp
.cfi_def_cfa_offset 64
movq %rdi, %rbp
movq %rsi, %r12
movl %edx, %eax
shrl $31, %eax
leal (%rax,%rdx), %ebx
sarl %ebx
movl $64, 20(%rsp)
movl $1, 24(%rsp)
leal 126(%rbx), %eax
movl %ebx, %edx
addl $63, %edx
cmovns %edx, %eax
sarl $6, %eax
movl %eax, 8(%rsp)
movl $1, 12(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L64
.L61:
addq $32, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L64:
.cfi_restore_state
movl %ebx, %edx
movq %r12, %rsi
movq %rbp, %rdi
call _Z42__device_stub__Z19kernel_multiplyConjPfS_iPfS_i
jmp .L61
.cfi_endproc
.LFE3697:
.size _Z16inv_multiplyConjPfS_i, .-_Z16inv_multiplyConjPfS_i
.globl _Z34__device_stub__Z11kernel_copyPfS_iPfS_i
.type _Z34__device_stub__Z11kernel_copyPfS_iPfS_i, @function
_Z34__device_stub__Z11kernel_copyPfS_iPfS_i:
.LFB3731:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L69
.L65:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L70
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L69:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z11kernel_copyPfS_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L65
.L70:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3731:
.size _Z34__device_stub__Z11kernel_copyPfS_iPfS_i, .-_Z34__device_stub__Z11kernel_copyPfS_iPfS_i
.globl _Z11kernel_copyPfS_i
.type _Z11kernel_copyPfS_i, @function
_Z11kernel_copyPfS_i:
.LFB3732:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z34__device_stub__Z11kernel_copyPfS_iPfS_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3732:
.size _Z11kernel_copyPfS_i, .-_Z11kernel_copyPfS_i
.globl _Z8inv_copyPfS_i
.type _Z8inv_copyPfS_i, @function
_Z8inv_copyPfS_i:
.LFB3698:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $32, %rsp
.cfi_def_cfa_offset 64
movq %rdi, %rbp
movq %rsi, %r12
movl %edx, %ebx
movl $64, 20(%rsp)
movl $1, 24(%rsp)
leal 126(%rdx), %eax
addl $63, %edx
cmovns %edx, %eax
sarl $6, %eax
movl %eax, 8(%rsp)
movl $1, 12(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L76
.L73:
addq $32, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L76:
.cfi_restore_state
movl %ebx, %edx
movq %r12, %rsi
movq %rbp, %rdi
call _Z34__device_stub__Z11kernel_copyPfS_iPfS_i
jmp .L73
.cfi_endproc
.LFE3698:
.size _Z8inv_copyPfS_i, .-_Z8inv_copyPfS_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z11kernel_copyPfS_i"
.LC1:
.string "_Z19kernel_multiplyConjPfS_i"
.LC2:
.string "_Z20kernel_multiply_realPfS_i"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC3:
.string "_Z23kernel_multiply_complexPfS_i"
.align 8
.LC4:
.string "_Z20kernel_divideStable2PfS_fi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3734:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z11kernel_copyPfS_i(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z19kernel_multiplyConjPfS_i(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z20kernel_multiply_realPfS_i(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq _Z23kernel_multiply_complexPfS_i(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC4(%rip), %rdx
movq %rdx, %rcx
leaq _Z20kernel_divideStable2PfS_fi(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3734:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <iostream>
#include <cuda_runtime_api.h>
#include <cuda.h>
#include <cufft.h>
const int THREADS=64;
__global__ void kernel_divideStable2(float *data1, float *data2, float threadshold, int len);
__global__ void kernel_multiply_complex(float *data1, float *data2, int len);
__global__ void kernel_multiply_real(float *data1, float *data2, int len);
__global__ void kernel_multiplyConj(float *data1, float *data2, int len);
__global__ void kernel_copy(float *data1, float *data2, int len);
using namespace std;
void inv_forwardDFT(float *data, int nx, int ny, int nz){
cufftReal *a=(cufftReal*)data;
cufftComplex *A=(cufftComplex*)data;
cufftHandle plan_forward;
if(nz==1){
cufftPlan2d(&plan_forward, nx, ny, CUFFT_R2C);
cufftExecR2C(plan_forward, a, A);
}
else{
// cout<<"nx= "<<nx<<" ny= "<<ny<<" nz= "<<nz<<endl;
cufftPlan3d(&plan_forward, nx, ny, nz, CUFFT_R2C);
cufftExecR2C(plan_forward, a, A);
}
cufftDestroy(plan_forward);
}
void inv_inverseDFT(float *data, int nx, int ny, int nz){
cufftReal *a=(cufftReal*)data;
cufftComplex *A=(cufftComplex*)data;
cufftHandle plan_backward;
if(nz==1){
cufftPlan2d(&plan_backward, nx, ny, CUFFT_C2R);
cufftExecC2R(plan_backward, A, a);
}
else{
cufftPlan3d(&plan_backward, nx, ny, nz, CUFFT_C2R);
cufftExecC2R(plan_backward, A, a);
}
cufftDestroy(plan_backward);
}
void inv_divideStable2(float *data1, float *data2, float threadshold, int len){
unsigned long int block=(len+THREADS-1)/THREADS;
kernel_divideStable2<<<block, THREADS>>>(data1, data2, threadshold, len);
}
__global__ void kernel_divideStable2(float *data1, float *data2, float threadshold, int len){
int offset=blockDim.x*blockIdx.x+threadIdx.x;
if(offset>=len) return;
if(data2[offset]<threadshold)
data2[offset]=(float)0;
else
data2[offset]=data1[offset]/data2[offset];
}
void inv_multiply_complex(float * data1, float *data2, int len){
unsigned long int block=(len/2+THREADS-1)/THREADS;
kernel_multiply_complex<<<block, THREADS>>>(data1, data2, len/2);
}
__global__ void kernel_multiply_complex(float *data1, float *data2, int len){
int offset=blockDim.x*blockIdx.x+threadIdx.x;
if(offset>=len) return;
float temp1=data1[(2*offset)];
float temp2=data2[(2*offset)];
data1[(2*offset)]=(temp1*temp2-data1[(2*offset+1)]*data2[(2*offset+1)]);
data1[(2*offset+1)]=(temp1*data2[(2*offset+1)]+data1[(2*offset+1)]*temp2);
}
void inv_multiply_real(float *data1, float *data2, int len){
unsigned long int block=(len+THREADS-1)/THREADS;
kernel_multiply_real<<<block, THREADS>>>(data1, data2, len);
}
__global__ void kernel_multiply_real(float *data1, float *data2, int len){
int offset=blockDim.x*blockIdx.x+threadIdx.x;
if(offset>=len) return;
data1[offset] *= data2[offset];
}
void inv_multiplyConj(float *data1, float *data2, int len){
unsigned long int block=(len/2+THREADS-1)/THREADS;
kernel_multiplyConj<<<block, THREADS>>>(data1, data2, len/2);
}
__global__ void kernel_multiplyConj(float *data1, float *data2, int len){
int offset=blockDim.x*blockIdx.x+threadIdx.x;
if(offset>=len) return;
float temp1 = data1[(2 * offset)];
float temp2 = data2[(2 * offset)];
data1[(2*offset)] = (temp1*temp2+data1[(2*offset+1)]*data2[(2*offset+1)]);
data1[(2*offset+1)] = (-temp1*data2[(2*offset+1)]+data1[(2*offset+1)]*temp2);
}
void inv_copy(float *data1, float *data2, int len){
unsigned long int block=(len+THREADS-1)/THREADS;
kernel_copy<<<block, THREADS>>>(data1, data2, len);
}
__global__ void kernel_copy(float *data1, float *data2, int len){
int offset=blockDim.x*blockIdx.x+threadIdx.x;
if(offset>=len) return;
data1[offset]=data2[offset];
} | #include <iostream>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include <hipfft/hipfft.h>
const int THREADS=64;
__global__ void kernel_divideStable2(float *data1, float *data2, float threadshold, int len);
__global__ void kernel_multiply_complex(float *data1, float *data2, int len);
__global__ void kernel_multiply_real(float *data1, float *data2, int len);
__global__ void kernel_multiplyConj(float *data1, float *data2, int len);
__global__ void kernel_copy(float *data1, float *data2, int len);
using namespace std;
void inv_forwardDFT(float *data, int nx, int ny, int nz){
hipfftReal *a=(hipfftReal*)data;
hipfftComplex *A=(hipfftComplex*)data;
hipfftHandle plan_forward;
if(nz==1){
hipfftPlan2d(&plan_forward, nx, ny, HIPFFT_R2C);
hipfftExecR2C(plan_forward, a, A);
}
else{
// cout<<"nx= "<<nx<<" ny= "<<ny<<" nz= "<<nz<<endl;
hipfftPlan3d(&plan_forward, nx, ny, nz, HIPFFT_R2C);
hipfftExecR2C(plan_forward, a, A);
}
hipfftDestroy(plan_forward);
}
void inv_inverseDFT(float *data, int nx, int ny, int nz){
hipfftReal *a=(hipfftReal*)data;
hipfftComplex *A=(hipfftComplex*)data;
hipfftHandle plan_backward;
if(nz==1){
hipfftPlan2d(&plan_backward, nx, ny, HIPFFT_C2R);
hipfftExecC2R(plan_backward, A, a);
}
else{
hipfftPlan3d(&plan_backward, nx, ny, nz, HIPFFT_C2R);
hipfftExecC2R(plan_backward, A, a);
}
hipfftDestroy(plan_backward);
}
void inv_divideStable2(float *data1, float *data2, float threadshold, int len){
unsigned long int block=(len+THREADS-1)/THREADS;
kernel_divideStable2<<<block, THREADS>>>(data1, data2, threadshold, len);
}
__global__ void kernel_divideStable2(float *data1, float *data2, float threadshold, int len){
int offset=blockDim.x*blockIdx.x+threadIdx.x;
if(offset>=len) return;
if(data2[offset]<threadshold)
data2[offset]=(float)0;
else
data2[offset]=data1[offset]/data2[offset];
}
void inv_multiply_complex(float * data1, float *data2, int len){
unsigned long int block=(len/2+THREADS-1)/THREADS;
kernel_multiply_complex<<<block, THREADS>>>(data1, data2, len/2);
}
__global__ void kernel_multiply_complex(float *data1, float *data2, int len){
int offset=blockDim.x*blockIdx.x+threadIdx.x;
if(offset>=len) return;
float temp1=data1[(2*offset)];
float temp2=data2[(2*offset)];
data1[(2*offset)]=(temp1*temp2-data1[(2*offset+1)]*data2[(2*offset+1)]);
data1[(2*offset+1)]=(temp1*data2[(2*offset+1)]+data1[(2*offset+1)]*temp2);
}
void inv_multiply_real(float *data1, float *data2, int len){
unsigned long int block=(len+THREADS-1)/THREADS;
kernel_multiply_real<<<block, THREADS>>>(data1, data2, len);
}
__global__ void kernel_multiply_real(float *data1, float *data2, int len){
int offset=blockDim.x*blockIdx.x+threadIdx.x;
if(offset>=len) return;
data1[offset] *= data2[offset];
}
void inv_multiplyConj(float *data1, float *data2, int len){
unsigned long int block=(len/2+THREADS-1)/THREADS;
kernel_multiplyConj<<<block, THREADS>>>(data1, data2, len/2);
}
__global__ void kernel_multiplyConj(float *data1, float *data2, int len){
int offset=blockDim.x*blockIdx.x+threadIdx.x;
if(offset>=len) return;
float temp1 = data1[(2 * offset)];
float temp2 = data2[(2 * offset)];
data1[(2*offset)] = (temp1*temp2+data1[(2*offset+1)]*data2[(2*offset+1)]);
data1[(2*offset+1)] = (-temp1*data2[(2*offset+1)]+data1[(2*offset+1)]*temp2);
}
void inv_copy(float *data1, float *data2, int len){
unsigned long int block=(len+THREADS-1)/THREADS;
kernel_copy<<<block, THREADS>>>(data1, data2, len);
}
__global__ void kernel_copy(float *data1, float *data2, int len){
int offset=blockDim.x*blockIdx.x+threadIdx.x;
if(offset>=len) return;
data1[offset]=data2[offset];
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <iostream>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include <hipfft/hipfft.h>
const int THREADS=64;
__global__ void kernel_divideStable2(float *data1, float *data2, float threadshold, int len);
__global__ void kernel_multiply_complex(float *data1, float *data2, int len);
__global__ void kernel_multiply_real(float *data1, float *data2, int len);
__global__ void kernel_multiplyConj(float *data1, float *data2, int len);
__global__ void kernel_copy(float *data1, float *data2, int len);
using namespace std;
void inv_forwardDFT(float *data, int nx, int ny, int nz){
hipfftReal *a=(hipfftReal*)data;
hipfftComplex *A=(hipfftComplex*)data;
hipfftHandle plan_forward;
if(nz==1){
hipfftPlan2d(&plan_forward, nx, ny, HIPFFT_R2C);
hipfftExecR2C(plan_forward, a, A);
}
else{
// cout<<"nx= "<<nx<<" ny= "<<ny<<" nz= "<<nz<<endl;
hipfftPlan3d(&plan_forward, nx, ny, nz, HIPFFT_R2C);
hipfftExecR2C(plan_forward, a, A);
}
hipfftDestroy(plan_forward);
}
void inv_inverseDFT(float *data, int nx, int ny, int nz){
hipfftReal *a=(hipfftReal*)data;
hipfftComplex *A=(hipfftComplex*)data;
hipfftHandle plan_backward;
if(nz==1){
hipfftPlan2d(&plan_backward, nx, ny, HIPFFT_C2R);
hipfftExecC2R(plan_backward, A, a);
}
else{
hipfftPlan3d(&plan_backward, nx, ny, nz, HIPFFT_C2R);
hipfftExecC2R(plan_backward, A, a);
}
hipfftDestroy(plan_backward);
}
void inv_divideStable2(float *data1, float *data2, float threadshold, int len){
unsigned long int block=(len+THREADS-1)/THREADS;
kernel_divideStable2<<<block, THREADS>>>(data1, data2, threadshold, len);
}
__global__ void kernel_divideStable2(float *data1, float *data2, float threadshold, int len){
int offset=blockDim.x*blockIdx.x+threadIdx.x;
if(offset>=len) return;
if(data2[offset]<threadshold)
data2[offset]=(float)0;
else
data2[offset]=data1[offset]/data2[offset];
}
void inv_multiply_complex(float * data1, float *data2, int len){
unsigned long int block=(len/2+THREADS-1)/THREADS;
kernel_multiply_complex<<<block, THREADS>>>(data1, data2, len/2);
}
__global__ void kernel_multiply_complex(float *data1, float *data2, int len){
int offset=blockDim.x*blockIdx.x+threadIdx.x;
if(offset>=len) return;
float temp1=data1[(2*offset)];
float temp2=data2[(2*offset)];
data1[(2*offset)]=(temp1*temp2-data1[(2*offset+1)]*data2[(2*offset+1)]);
data1[(2*offset+1)]=(temp1*data2[(2*offset+1)]+data1[(2*offset+1)]*temp2);
}
void inv_multiply_real(float *data1, float *data2, int len){
unsigned long int block=(len+THREADS-1)/THREADS;
kernel_multiply_real<<<block, THREADS>>>(data1, data2, len);
}
__global__ void kernel_multiply_real(float *data1, float *data2, int len){
int offset=blockDim.x*blockIdx.x+threadIdx.x;
if(offset>=len) return;
data1[offset] *= data2[offset];
}
void inv_multiplyConj(float *data1, float *data2, int len){
unsigned long int block=(len/2+THREADS-1)/THREADS;
kernel_multiplyConj<<<block, THREADS>>>(data1, data2, len/2);
}
__global__ void kernel_multiplyConj(float *data1, float *data2, int len){
int offset=blockDim.x*blockIdx.x+threadIdx.x;
if(offset>=len) return;
float temp1 = data1[(2 * offset)];
float temp2 = data2[(2 * offset)];
data1[(2*offset)] = (temp1*temp2+data1[(2*offset+1)]*data2[(2*offset+1)]);
data1[(2*offset+1)] = (-temp1*data2[(2*offset+1)]+data1[(2*offset+1)]*temp2);
}
void inv_copy(float *data1, float *data2, int len){
unsigned long int block=(len+THREADS-1)/THREADS;
kernel_copy<<<block, THREADS>>>(data1, data2, len);
}
__global__ void kernel_copy(float *data1, float *data2, int len){
int offset=blockDim.x*blockIdx.x+threadIdx.x;
if(offset>=len) return;
data1[offset]=data2[offset];
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z20kernel_divideStable2PfS_fi
.globl _Z20kernel_divideStable2PfS_fi
.p2align 8
.type _Z20kernel_divideStable2PfS_fi,@function
_Z20kernel_divideStable2PfS_fi:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b32 s3, s[0:1], 0x14
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_4
s_clause 0x1
s_load_b64 s[2:3], s[0:1], 0x8
s_load_b32 s4, s[0:1], 0x10
v_ashrrev_i32_e32 v2, 31, v1
v_mov_b32_e32 v5, 0
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s2, v2
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v3, vcc_lo
s_mov_b32 s2, exec_lo
global_load_b32 v4, v[0:1], off
s_waitcnt vmcnt(0)
v_cmpx_ngt_f32_e32 s4, v4
s_cbranch_execz .LBB0_3
s_load_b64 s[0:1], s[0:1], 0x0
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s0, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v3, vcc_lo
global_load_b32 v2, v[2:3], off
s_waitcnt vmcnt(0)
v_div_scale_f32 v3, null, v4, v4, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_rcp_f32_e32 v5, v3
s_waitcnt_depctr 0xfff
v_fma_f32 v6, -v3, v5, 1.0
v_fmac_f32_e32 v5, v6, v5
v_div_scale_f32 v6, vcc_lo, v2, v4, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_f32_e32 v7, v6, v5
v_fma_f32 v8, -v3, v7, v6
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fmac_f32_e32 v7, v8, v5
v_fma_f32 v3, -v3, v7, v6
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_div_fmas_f32 v3, v3, v5, v7
v_div_fixup_f32 v5, v3, v4, v2
.LBB0_3:
s_or_b32 exec_lo, exec_lo, s2
global_store_b32 v[0:1], v5, off
.LBB0_4:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z20kernel_divideStable2PfS_fi
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 9
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z20kernel_divideStable2PfS_fi, .Lfunc_end0-_Z20kernel_divideStable2PfS_fi
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z23kernel_multiply_complexPfS_i
.globl _Z23kernel_multiply_complexPfS_i
.p2align 8
.type _Z23kernel_multiply_complexPfS_i,@function
_Z23kernel_multiply_complexPfS_i:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b32 s3, s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB1_2
s_load_b128 s[0:3], s[0:1], 0x0
v_lshlrev_b32_e32 v0, 1, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_or_b32_e32 v2, 1, v0
v_ashrrev_i32_e32 v1, 31, v0
v_ashrrev_i32_e32 v3, 31, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[0:1], 2, v[0:1]
v_lshlrev_b64 v[2:3], 2, v[2:3]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v4, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s1, v1, vcc_lo
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_u32 v6, vcc_lo, s0, v2
v_add_co_ci_u32_e32 v7, vcc_lo, s1, v3, vcc_lo
v_add_co_u32 v2, vcc_lo, s2, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s3, v3, vcc_lo
v_add_co_u32 v0, vcc_lo, s2, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
global_load_b32 v8, v[6:7], off
global_load_b32 v9, v[2:3], off
global_load_b32 v10, v[4:5], off
global_load_b32 v0, v[0:1], off
s_waitcnt vmcnt(2)
v_mul_f32_e32 v1, v8, v9
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
v_fma_f32 v1, v10, v0, -v1
global_store_b32 v[4:5], v1, off
global_load_b32 v1, v[2:3], off
s_waitcnt vmcnt(0)
v_mul_f32_e32 v1, v10, v1
v_fmac_f32_e32 v1, v0, v8
global_store_b32 v[6:7], v1, off
.LBB1_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z23kernel_multiply_complexPfS_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 11
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z23kernel_multiply_complexPfS_i, .Lfunc_end1-_Z23kernel_multiply_complexPfS_i
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z20kernel_multiply_realPfS_i
.globl _Z20kernel_multiply_realPfS_i
.p2align 8
.type _Z20kernel_multiply_realPfS_i,@function
_Z20kernel_multiply_realPfS_i:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b32 s3, s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB2_2
s_load_b128 s[0:3], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s2, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s3, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[0:1], off
s_waitcnt vmcnt(0)
v_mul_f32_e32 v2, v2, v3
global_store_b32 v[0:1], v2, off
.LBB2_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z20kernel_multiply_realPfS_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end2:
.size _Z20kernel_multiply_realPfS_i, .Lfunc_end2-_Z20kernel_multiply_realPfS_i
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z19kernel_multiplyConjPfS_i
.globl _Z19kernel_multiplyConjPfS_i
.p2align 8
.type _Z19kernel_multiplyConjPfS_i,@function
_Z19kernel_multiplyConjPfS_i:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b32 s3, s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB3_2
s_load_b128 s[0:3], s[0:1], 0x0
v_lshlrev_b32_e32 v0, 1, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_or_b32_e32 v2, 1, v0
v_ashrrev_i32_e32 v1, 31, v0
v_ashrrev_i32_e32 v3, 31, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[0:1], 2, v[0:1]
v_lshlrev_b64 v[2:3], 2, v[2:3]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v4, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s1, v1, vcc_lo
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_u32 v6, vcc_lo, s0, v2
v_add_co_ci_u32_e32 v7, vcc_lo, s1, v3, vcc_lo
v_add_co_u32 v2, vcc_lo, s2, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s3, v3, vcc_lo
v_add_co_u32 v0, vcc_lo, s2, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
global_load_b32 v8, v[6:7], off
global_load_b32 v9, v[2:3], off
global_load_b32 v10, v[4:5], off
global_load_b32 v0, v[0:1], off
s_waitcnt vmcnt(2)
v_mul_f32_e32 v1, v8, v9
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_4) | instid1(VALU_DEP_1)
v_fmac_f32_e32 v1, v10, v0
global_store_b32 v[4:5], v1, off
global_load_b32 v1, v[2:3], off
s_waitcnt vmcnt(0)
v_mul_f32_e32 v1, v10, v1
v_fma_f32 v0, v0, v8, -v1
global_store_b32 v[6:7], v0, off
.LBB3_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z19kernel_multiplyConjPfS_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 11
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end3:
.size _Z19kernel_multiplyConjPfS_i, .Lfunc_end3-_Z19kernel_multiplyConjPfS_i
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z11kernel_copyPfS_i
.globl _Z11kernel_copyPfS_i
.p2align 8
.type _Z11kernel_copyPfS_i,@function
_Z11kernel_copyPfS_i:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b32 s3, s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB4_2
s_load_b128 s[0:3], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s2, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s3, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_load_b32 v2, v[2:3], off
s_waitcnt vmcnt(0)
global_store_b32 v[0:1], v2, off
.LBB4_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z11kernel_copyPfS_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end4:
.size _Z11kernel_copyPfS_i, .Lfunc_end4-_Z11kernel_copyPfS_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z20kernel_divideStable2PfS_fi
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z20kernel_divideStable2PfS_fi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 9
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z23kernel_multiply_complexPfS_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z23kernel_multiply_complexPfS_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 11
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z20kernel_multiply_realPfS_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z20kernel_multiply_realPfS_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z19kernel_multiplyConjPfS_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z19kernel_multiplyConjPfS_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 11
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z11kernel_copyPfS_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z11kernel_copyPfS_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <iostream>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include <hipfft/hipfft.h>
const int THREADS=64;
__global__ void kernel_divideStable2(float *data1, float *data2, float threadshold, int len);
__global__ void kernel_multiply_complex(float *data1, float *data2, int len);
__global__ void kernel_multiply_real(float *data1, float *data2, int len);
__global__ void kernel_multiplyConj(float *data1, float *data2, int len);
__global__ void kernel_copy(float *data1, float *data2, int len);
using namespace std;
void inv_forwardDFT(float *data, int nx, int ny, int nz){
hipfftReal *a=(hipfftReal*)data;
hipfftComplex *A=(hipfftComplex*)data;
hipfftHandle plan_forward;
if(nz==1){
hipfftPlan2d(&plan_forward, nx, ny, HIPFFT_R2C);
hipfftExecR2C(plan_forward, a, A);
}
else{
// cout<<"nx= "<<nx<<" ny= "<<ny<<" nz= "<<nz<<endl;
hipfftPlan3d(&plan_forward, nx, ny, nz, HIPFFT_R2C);
hipfftExecR2C(plan_forward, a, A);
}
hipfftDestroy(plan_forward);
}
void inv_inverseDFT(float *data, int nx, int ny, int nz){
hipfftReal *a=(hipfftReal*)data;
hipfftComplex *A=(hipfftComplex*)data;
hipfftHandle plan_backward;
if(nz==1){
hipfftPlan2d(&plan_backward, nx, ny, HIPFFT_C2R);
hipfftExecC2R(plan_backward, A, a);
}
else{
hipfftPlan3d(&plan_backward, nx, ny, nz, HIPFFT_C2R);
hipfftExecC2R(plan_backward, A, a);
}
hipfftDestroy(plan_backward);
}
void inv_divideStable2(float *data1, float *data2, float threadshold, int len){
unsigned long int block=(len+THREADS-1)/THREADS;
kernel_divideStable2<<<block, THREADS>>>(data1, data2, threadshold, len);
}
__global__ void kernel_divideStable2(float *data1, float *data2, float threadshold, int len){
int offset=blockDim.x*blockIdx.x+threadIdx.x;
if(offset>=len) return;
if(data2[offset]<threadshold)
data2[offset]=(float)0;
else
data2[offset]=data1[offset]/data2[offset];
}
void inv_multiply_complex(float * data1, float *data2, int len){
unsigned long int block=(len/2+THREADS-1)/THREADS;
kernel_multiply_complex<<<block, THREADS>>>(data1, data2, len/2);
}
__global__ void kernel_multiply_complex(float *data1, float *data2, int len){
int offset=blockDim.x*blockIdx.x+threadIdx.x;
if(offset>=len) return;
float temp1=data1[(2*offset)];
float temp2=data2[(2*offset)];
data1[(2*offset)]=(temp1*temp2-data1[(2*offset+1)]*data2[(2*offset+1)]);
data1[(2*offset+1)]=(temp1*data2[(2*offset+1)]+data1[(2*offset+1)]*temp2);
}
void inv_multiply_real(float *data1, float *data2, int len){
unsigned long int block=(len+THREADS-1)/THREADS;
kernel_multiply_real<<<block, THREADS>>>(data1, data2, len);
}
__global__ void kernel_multiply_real(float *data1, float *data2, int len){
int offset=blockDim.x*blockIdx.x+threadIdx.x;
if(offset>=len) return;
data1[offset] *= data2[offset];
}
void inv_multiplyConj(float *data1, float *data2, int len){
unsigned long int block=(len/2+THREADS-1)/THREADS;
kernel_multiplyConj<<<block, THREADS>>>(data1, data2, len/2);
}
__global__ void kernel_multiplyConj(float *data1, float *data2, int len){
int offset=blockDim.x*blockIdx.x+threadIdx.x;
if(offset>=len) return;
float temp1 = data1[(2 * offset)];
float temp2 = data2[(2 * offset)];
data1[(2*offset)] = (temp1*temp2+data1[(2*offset+1)]*data2[(2*offset+1)]);
data1[(2*offset+1)] = (-temp1*data2[(2*offset+1)]+data1[(2*offset+1)]*temp2);
}
void inv_copy(float *data1, float *data2, int len){
unsigned long int block=(len+THREADS-1)/THREADS;
kernel_copy<<<block, THREADS>>>(data1, data2, len);
}
__global__ void kernel_copy(float *data1, float *data2, int len){
int offset=blockDim.x*blockIdx.x+threadIdx.x;
if(offset>=len) return;
data1[offset]=data2[offset];
} | .text
.file "Fourier.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z14inv_forwardDFTPfiii # -- Begin function _Z14inv_forwardDFTPfiii
.p2align 4, 0x90
.type _Z14inv_forwardDFTPfiii,@function
_Z14inv_forwardDFTPfiii: # @_Z14inv_forwardDFTPfiii
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $16, %rsp
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -16
movq %rdi, %rbx
leaq 8(%rsp), %rdi
cmpl $1, %ecx
jne .LBB0_2
# %bb.1:
movl $42, %ecx
callq hipfftPlan2d
jmp .LBB0_3
.LBB0_2:
movl $42, %r8d
callq hipfftPlan3d
.LBB0_3:
movq 8(%rsp), %rdi
movq %rbx, %rsi
movq %rbx, %rdx
callq hipfftExecR2C
movq 8(%rsp), %rdi
callq hipfftDestroy
addq $16, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size _Z14inv_forwardDFTPfiii, .Lfunc_end0-_Z14inv_forwardDFTPfiii
.cfi_endproc
# -- End function
.globl _Z14inv_inverseDFTPfiii # -- Begin function _Z14inv_inverseDFTPfiii
.p2align 4, 0x90
.type _Z14inv_inverseDFTPfiii,@function
_Z14inv_inverseDFTPfiii: # @_Z14inv_inverseDFTPfiii
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $16, %rsp
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -16
movq %rdi, %rbx
leaq 8(%rsp), %rdi
cmpl $1, %ecx
jne .LBB1_2
# %bb.1:
movl $44, %ecx
callq hipfftPlan2d
jmp .LBB1_3
.LBB1_2:
movl $44, %r8d
callq hipfftPlan3d
.LBB1_3:
movq 8(%rsp), %rdi
movq %rbx, %rsi
movq %rbx, %rdx
callq hipfftExecC2R
movq 8(%rsp), %rdi
callq hipfftDestroy
addq $16, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z14inv_inverseDFTPfiii, .Lfunc_end1-_Z14inv_inverseDFTPfiii
.cfi_endproc
# -- End function
.globl _Z17inv_divideStable2PfS_fi # -- Begin function _Z17inv_divideStable2PfS_fi
.p2align 4, 0x90
.type _Z17inv_divideStable2PfS_fi,@function
_Z17inv_divideStable2PfS_fi: # @_Z17inv_divideStable2PfS_fi
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $112, %rsp
.cfi_def_cfa_offset 144
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl %edx, %ebx
movss %xmm0, 4(%rsp) # 4-byte Spill
movq %rsi, %r14
movq %rdi, %r15
leal 63(%rbx), %eax
leal 126(%rbx), %edi
testl %eax, %eax
cmovnsl %eax, %edi
sarl $6, %edi
movabsq $4294967296, %rdx # imm = 0x100000000
orq %rdx, %rdi
orq $64, %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_2
# %bb.1:
movq %r15, 72(%rsp)
movq %r14, 64(%rsp)
movss 4(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
movss %xmm0, 12(%rsp)
movl %ebx, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z20kernel_divideStable2PfS_fi, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_2:
addq $112, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size _Z17inv_divideStable2PfS_fi, .Lfunc_end2-_Z17inv_divideStable2PfS_fi
.cfi_endproc
# -- End function
.globl _Z35__device_stub__kernel_divideStable2PfS_fi # -- Begin function _Z35__device_stub__kernel_divideStable2PfS_fi
.p2align 4, 0x90
.type _Z35__device_stub__kernel_divideStable2PfS_fi,@function
_Z35__device_stub__kernel_divideStable2PfS_fi: # @_Z35__device_stub__kernel_divideStable2PfS_fi
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movss %xmm0, 12(%rsp)
movl %edx, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z20kernel_divideStable2PfS_fi, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end3:
.size _Z35__device_stub__kernel_divideStable2PfS_fi, .Lfunc_end3-_Z35__device_stub__kernel_divideStable2PfS_fi
.cfi_endproc
# -- End function
.globl _Z20inv_multiply_complexPfS_i # -- Begin function _Z20inv_multiply_complexPfS_i
.p2align 4, 0x90
.type _Z20inv_multiply_complexPfS_i,@function
_Z20inv_multiply_complexPfS_i: # @_Z20inv_multiply_complexPfS_i
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $112, %rsp
.cfi_def_cfa_offset 144
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rsi, %rbx
movq %rdi, %r14
movl %edx, %r15d
shrl $31, %r15d
addl %edx, %r15d
sarl %r15d
leal 63(%r15), %eax
leal 126(%r15), %edi
testl %eax, %eax
cmovnsl %eax, %edi
sarl $6, %edi
movabsq $4294967296, %rdx # imm = 0x100000000
orq %rdx, %rdi
orq $64, %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB4_2
# %bb.1:
movq %r14, 72(%rsp)
movq %rbx, 64(%rsp)
movl %r15d, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z23kernel_multiply_complexPfS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB4_2:
addq $112, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end4:
.size _Z20inv_multiply_complexPfS_i, .Lfunc_end4-_Z20inv_multiply_complexPfS_i
.cfi_endproc
# -- End function
.globl _Z38__device_stub__kernel_multiply_complexPfS_i # -- Begin function _Z38__device_stub__kernel_multiply_complexPfS_i
.p2align 4, 0x90
.type _Z38__device_stub__kernel_multiply_complexPfS_i,@function
_Z38__device_stub__kernel_multiply_complexPfS_i: # @_Z38__device_stub__kernel_multiply_complexPfS_i
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z23kernel_multiply_complexPfS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end5:
.size _Z38__device_stub__kernel_multiply_complexPfS_i, .Lfunc_end5-_Z38__device_stub__kernel_multiply_complexPfS_i
.cfi_endproc
# -- End function
.globl _Z17inv_multiply_realPfS_i # -- Begin function _Z17inv_multiply_realPfS_i
.p2align 4, 0x90
.type _Z17inv_multiply_realPfS_i,@function
_Z17inv_multiply_realPfS_i: # @_Z17inv_multiply_realPfS_i
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $112, %rsp
.cfi_def_cfa_offset 144
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl %edx, %ebx
movq %rsi, %r14
movq %rdi, %r15
leal 63(%rbx), %eax
leal 126(%rbx), %edi
testl %eax, %eax
cmovnsl %eax, %edi
sarl $6, %edi
movabsq $4294967296, %rdx # imm = 0x100000000
orq %rdx, %rdi
orq $64, %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB6_2
# %bb.1:
movq %r15, 72(%rsp)
movq %r14, 64(%rsp)
movl %ebx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z20kernel_multiply_realPfS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB6_2:
addq $112, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end6:
.size _Z17inv_multiply_realPfS_i, .Lfunc_end6-_Z17inv_multiply_realPfS_i
.cfi_endproc
# -- End function
.globl _Z35__device_stub__kernel_multiply_realPfS_i # -- Begin function _Z35__device_stub__kernel_multiply_realPfS_i
.p2align 4, 0x90
.type _Z35__device_stub__kernel_multiply_realPfS_i,@function
_Z35__device_stub__kernel_multiply_realPfS_i: # @_Z35__device_stub__kernel_multiply_realPfS_i
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z20kernel_multiply_realPfS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end7:
.size _Z35__device_stub__kernel_multiply_realPfS_i, .Lfunc_end7-_Z35__device_stub__kernel_multiply_realPfS_i
.cfi_endproc
# -- End function
.globl _Z16inv_multiplyConjPfS_i # -- Begin function _Z16inv_multiplyConjPfS_i
.p2align 4, 0x90
.type _Z16inv_multiplyConjPfS_i,@function
_Z16inv_multiplyConjPfS_i: # @_Z16inv_multiplyConjPfS_i
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $112, %rsp
.cfi_def_cfa_offset 144
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rsi, %rbx
movq %rdi, %r14
movl %edx, %r15d
shrl $31, %r15d
addl %edx, %r15d
sarl %r15d
leal 63(%r15), %eax
leal 126(%r15), %edi
testl %eax, %eax
cmovnsl %eax, %edi
sarl $6, %edi
movabsq $4294967296, %rdx # imm = 0x100000000
orq %rdx, %rdi
orq $64, %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB8_2
# %bb.1:
movq %r14, 72(%rsp)
movq %rbx, 64(%rsp)
movl %r15d, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z19kernel_multiplyConjPfS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB8_2:
addq $112, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end8:
.size _Z16inv_multiplyConjPfS_i, .Lfunc_end8-_Z16inv_multiplyConjPfS_i
.cfi_endproc
# -- End function
.globl _Z34__device_stub__kernel_multiplyConjPfS_i # -- Begin function _Z34__device_stub__kernel_multiplyConjPfS_i
.p2align 4, 0x90
.type _Z34__device_stub__kernel_multiplyConjPfS_i,@function
_Z34__device_stub__kernel_multiplyConjPfS_i: # @_Z34__device_stub__kernel_multiplyConjPfS_i
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z19kernel_multiplyConjPfS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end9:
.size _Z34__device_stub__kernel_multiplyConjPfS_i, .Lfunc_end9-_Z34__device_stub__kernel_multiplyConjPfS_i
.cfi_endproc
# -- End function
.globl _Z8inv_copyPfS_i # -- Begin function _Z8inv_copyPfS_i
.p2align 4, 0x90
.type _Z8inv_copyPfS_i,@function
_Z8inv_copyPfS_i: # @_Z8inv_copyPfS_i
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $112, %rsp
.cfi_def_cfa_offset 144
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl %edx, %ebx
movq %rsi, %r14
movq %rdi, %r15
leal 63(%rbx), %eax
leal 126(%rbx), %edi
testl %eax, %eax
cmovnsl %eax, %edi
sarl $6, %edi
movabsq $4294967296, %rdx # imm = 0x100000000
orq %rdx, %rdi
orq $64, %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB10_2
# %bb.1:
movq %r15, 72(%rsp)
movq %r14, 64(%rsp)
movl %ebx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z11kernel_copyPfS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB10_2:
addq $112, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end10:
.size _Z8inv_copyPfS_i, .Lfunc_end10-_Z8inv_copyPfS_i
.cfi_endproc
# -- End function
.globl _Z26__device_stub__kernel_copyPfS_i # -- Begin function _Z26__device_stub__kernel_copyPfS_i
.p2align 4, 0x90
.type _Z26__device_stub__kernel_copyPfS_i,@function
_Z26__device_stub__kernel_copyPfS_i: # @_Z26__device_stub__kernel_copyPfS_i
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z11kernel_copyPfS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end11:
.size _Z26__device_stub__kernel_copyPfS_i, .Lfunc_end11-_Z26__device_stub__kernel_copyPfS_i
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB12_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB12_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z20kernel_divideStable2PfS_fi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z23kernel_multiply_complexPfS_i, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z20kernel_multiply_realPfS_i, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z19kernel_multiplyConjPfS_i, %esi
movl $.L__unnamed_4, %edx
movl $.L__unnamed_4, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z11kernel_copyPfS_i, %esi
movl $.L__unnamed_5, %edx
movl $.L__unnamed_5, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end12:
.size __hip_module_ctor, .Lfunc_end12-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB13_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB13_2:
retq
.Lfunc_end13:
.size __hip_module_dtor, .Lfunc_end13-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z20kernel_divideStable2PfS_fi,@object # @_Z20kernel_divideStable2PfS_fi
.section .rodata,"a",@progbits
.globl _Z20kernel_divideStable2PfS_fi
.p2align 3, 0x0
_Z20kernel_divideStable2PfS_fi:
.quad _Z35__device_stub__kernel_divideStable2PfS_fi
.size _Z20kernel_divideStable2PfS_fi, 8
.type _Z23kernel_multiply_complexPfS_i,@object # @_Z23kernel_multiply_complexPfS_i
.globl _Z23kernel_multiply_complexPfS_i
.p2align 3, 0x0
_Z23kernel_multiply_complexPfS_i:
.quad _Z38__device_stub__kernel_multiply_complexPfS_i
.size _Z23kernel_multiply_complexPfS_i, 8
.type _Z20kernel_multiply_realPfS_i,@object # @_Z20kernel_multiply_realPfS_i
.globl _Z20kernel_multiply_realPfS_i
.p2align 3, 0x0
_Z20kernel_multiply_realPfS_i:
.quad _Z35__device_stub__kernel_multiply_realPfS_i
.size _Z20kernel_multiply_realPfS_i, 8
.type _Z19kernel_multiplyConjPfS_i,@object # @_Z19kernel_multiplyConjPfS_i
.globl _Z19kernel_multiplyConjPfS_i
.p2align 3, 0x0
_Z19kernel_multiplyConjPfS_i:
.quad _Z34__device_stub__kernel_multiplyConjPfS_i
.size _Z19kernel_multiplyConjPfS_i, 8
.type _Z11kernel_copyPfS_i,@object # @_Z11kernel_copyPfS_i
.globl _Z11kernel_copyPfS_i
.p2align 3, 0x0
_Z11kernel_copyPfS_i:
.quad _Z26__device_stub__kernel_copyPfS_i
.size _Z11kernel_copyPfS_i, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z20kernel_divideStable2PfS_fi"
.size .L__unnamed_1, 31
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z23kernel_multiply_complexPfS_i"
.size .L__unnamed_2, 33
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "_Z20kernel_multiply_realPfS_i"
.size .L__unnamed_3, 30
.type .L__unnamed_4,@object # @3
.L__unnamed_4:
.asciz "_Z19kernel_multiplyConjPfS_i"
.size .L__unnamed_4, 29
.type .L__unnamed_5,@object # @4
.L__unnamed_5:
.asciz "_Z11kernel_copyPfS_i"
.size .L__unnamed_5, 21
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z35__device_stub__kernel_divideStable2PfS_fi
.addrsig_sym _Z38__device_stub__kernel_multiply_complexPfS_i
.addrsig_sym _Z35__device_stub__kernel_multiply_realPfS_i
.addrsig_sym _Z34__device_stub__kernel_multiplyConjPfS_i
.addrsig_sym _Z26__device_stub__kernel_copyPfS_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z20kernel_divideStable2PfS_fi
.addrsig_sym _Z23kernel_multiply_complexPfS_i
.addrsig_sym _Z20kernel_multiply_realPfS_i
.addrsig_sym _Z19kernel_multiplyConjPfS_i
.addrsig_sym _Z11kernel_copyPfS_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00049d04_00000000-6_Fourier.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3701:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3701:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z14inv_forwardDFTPfiii
.type _Z14inv_forwardDFTPfiii, @function
_Z14inv_forwardDFTPfiii:
.LFB3692:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
subq $16, %rsp
.cfi_def_cfa_offset 32
movq %rdi, %rbx
movq %fs:40, %rax
movq %rax, 8(%rsp)
xorl %eax, %eax
cmpl $1, %ecx
je .L8
leaq 4(%rsp), %rdi
movl $42, %r8d
call cufftPlan3d@PLT
movq %rbx, %rdx
movq %rbx, %rsi
movl 4(%rsp), %edi
call cufftExecR2C@PLT
.L5:
movl 4(%rsp), %edi
call cufftDestroy@PLT
movq 8(%rsp), %rax
subq %fs:40, %rax
jne .L9
addq $16, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
ret
.L8:
.cfi_restore_state
leaq 4(%rsp), %rdi
movl $42, %ecx
call cufftPlan2d@PLT
movq %rbx, %rdx
movq %rbx, %rsi
movl 4(%rsp), %edi
call cufftExecR2C@PLT
jmp .L5
.L9:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3692:
.size _Z14inv_forwardDFTPfiii, .-_Z14inv_forwardDFTPfiii
.globl _Z14inv_inverseDFTPfiii
.type _Z14inv_inverseDFTPfiii, @function
_Z14inv_inverseDFTPfiii:
.LFB3693:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
subq $16, %rsp
.cfi_def_cfa_offset 32
movq %rdi, %rbx
movq %fs:40, %rax
movq %rax, 8(%rsp)
xorl %eax, %eax
cmpl $1, %ecx
je .L15
leaq 4(%rsp), %rdi
movl $44, %r8d
call cufftPlan3d@PLT
movq %rbx, %rdx
movq %rbx, %rsi
movl 4(%rsp), %edi
call cufftExecC2R@PLT
.L12:
movl 4(%rsp), %edi
call cufftDestroy@PLT
movq 8(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $16, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
leaq 4(%rsp), %rdi
movl $44, %ecx
call cufftPlan2d@PLT
movq %rbx, %rdx
movq %rbx, %rsi
movl 4(%rsp), %edi
call cufftExecC2R@PLT
jmp .L12
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3693:
.size _Z14inv_inverseDFTPfiii, .-_Z14inv_inverseDFTPfiii
.globl _Z44__device_stub__Z20kernel_divideStable2PfS_fiPfS_fi
.type _Z44__device_stub__Z20kernel_divideStable2PfS_fiPfS_fi, @function
_Z44__device_stub__Z20kernel_divideStable2PfS_fiPfS_fi:
.LFB3723:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movss %xmm0, 12(%rsp)
movl %edx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L21
.L17:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L22
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L21:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z20kernel_divideStable2PfS_fi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L17
.L22:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3723:
.size _Z44__device_stub__Z20kernel_divideStable2PfS_fiPfS_fi, .-_Z44__device_stub__Z20kernel_divideStable2PfS_fiPfS_fi
.globl _Z20kernel_divideStable2PfS_fi
.type _Z20kernel_divideStable2PfS_fi, @function
_Z20kernel_divideStable2PfS_fi:
.LFB3724:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z44__device_stub__Z20kernel_divideStable2PfS_fiPfS_fi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3724:
.size _Z20kernel_divideStable2PfS_fi, .-_Z20kernel_divideStable2PfS_fi
.globl _Z17inv_divideStable2PfS_fi
.type _Z17inv_divideStable2PfS_fi, @function
_Z17inv_divideStable2PfS_fi:
.LFB3694:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $48, %rsp
.cfi_def_cfa_offset 80
movq %rdi, %rbp
movq %rsi, %r12
movss %xmm0, 12(%rsp)
movl %edx, %ebx
movl $64, 36(%rsp)
movl $1, 40(%rsp)
leal 126(%rdx), %eax
addl $63, %edx
cmovns %edx, %eax
sarl $6, %eax
movl %eax, 24(%rsp)
movl $1, 28(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 36(%rsp), %rdx
movl $1, %ecx
movq 24(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L28
.L25:
addq $48, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L28:
.cfi_restore_state
movl %ebx, %edx
movss 12(%rsp), %xmm0
movq %r12, %rsi
movq %rbp, %rdi
call _Z44__device_stub__Z20kernel_divideStable2PfS_fiPfS_fi
jmp .L25
.cfi_endproc
.LFE3694:
.size _Z17inv_divideStable2PfS_fi, .-_Z17inv_divideStable2PfS_fi
.globl _Z46__device_stub__Z23kernel_multiply_complexPfS_iPfS_i
.type _Z46__device_stub__Z23kernel_multiply_complexPfS_iPfS_i, @function
_Z46__device_stub__Z23kernel_multiply_complexPfS_iPfS_i:
.LFB3725:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L33
.L29:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L34
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L33:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z23kernel_multiply_complexPfS_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L29
.L34:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3725:
.size _Z46__device_stub__Z23kernel_multiply_complexPfS_iPfS_i, .-_Z46__device_stub__Z23kernel_multiply_complexPfS_iPfS_i
.globl _Z23kernel_multiply_complexPfS_i
.type _Z23kernel_multiply_complexPfS_i, @function
_Z23kernel_multiply_complexPfS_i:
.LFB3726:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z46__device_stub__Z23kernel_multiply_complexPfS_iPfS_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3726:
.size _Z23kernel_multiply_complexPfS_i, .-_Z23kernel_multiply_complexPfS_i
.globl _Z20inv_multiply_complexPfS_i
.type _Z20inv_multiply_complexPfS_i, @function
_Z20inv_multiply_complexPfS_i:
.LFB3695:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $32, %rsp
.cfi_def_cfa_offset 64
movq %rdi, %rbp
movq %rsi, %r12
movl %edx, %eax
shrl $31, %eax
leal (%rax,%rdx), %ebx
sarl %ebx
movl $64, 20(%rsp)
movl $1, 24(%rsp)
leal 126(%rbx), %eax
movl %ebx, %edx
addl $63, %edx
cmovns %edx, %eax
sarl $6, %eax
movl %eax, 8(%rsp)
movl $1, 12(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L40
.L37:
addq $32, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L40:
.cfi_restore_state
movl %ebx, %edx
movq %r12, %rsi
movq %rbp, %rdi
call _Z46__device_stub__Z23kernel_multiply_complexPfS_iPfS_i
jmp .L37
.cfi_endproc
.LFE3695:
.size _Z20inv_multiply_complexPfS_i, .-_Z20inv_multiply_complexPfS_i
.globl _Z43__device_stub__Z20kernel_multiply_realPfS_iPfS_i
.type _Z43__device_stub__Z20kernel_multiply_realPfS_iPfS_i, @function
_Z43__device_stub__Z20kernel_multiply_realPfS_iPfS_i:
.LFB3727:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L45
.L41:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L46
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L45:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z20kernel_multiply_realPfS_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L41
.L46:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3727:
.size _Z43__device_stub__Z20kernel_multiply_realPfS_iPfS_i, .-_Z43__device_stub__Z20kernel_multiply_realPfS_iPfS_i
.globl _Z20kernel_multiply_realPfS_i
.type _Z20kernel_multiply_realPfS_i, @function
_Z20kernel_multiply_realPfS_i:
.LFB3728:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z43__device_stub__Z20kernel_multiply_realPfS_iPfS_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3728:
.size _Z20kernel_multiply_realPfS_i, .-_Z20kernel_multiply_realPfS_i
.globl _Z17inv_multiply_realPfS_i
.type _Z17inv_multiply_realPfS_i, @function
_Z17inv_multiply_realPfS_i:
.LFB3696:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $32, %rsp
.cfi_def_cfa_offset 64
movq %rdi, %rbp
movq %rsi, %r12
movl %edx, %ebx
movl $64, 20(%rsp)
movl $1, 24(%rsp)
leal 126(%rdx), %eax
addl $63, %edx
cmovns %edx, %eax
sarl $6, %eax
movl %eax, 8(%rsp)
movl $1, 12(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L52
.L49:
addq $32, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L52:
.cfi_restore_state
movl %ebx, %edx
movq %r12, %rsi
movq %rbp, %rdi
call _Z43__device_stub__Z20kernel_multiply_realPfS_iPfS_i
jmp .L49
.cfi_endproc
.LFE3696:
.size _Z17inv_multiply_realPfS_i, .-_Z17inv_multiply_realPfS_i
.globl _Z42__device_stub__Z19kernel_multiplyConjPfS_iPfS_i
.type _Z42__device_stub__Z19kernel_multiplyConjPfS_iPfS_i, @function
_Z42__device_stub__Z19kernel_multiplyConjPfS_iPfS_i:
.LFB3729:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L57
.L53:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L58
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L57:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z19kernel_multiplyConjPfS_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L53
.L58:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3729:
.size _Z42__device_stub__Z19kernel_multiplyConjPfS_iPfS_i, .-_Z42__device_stub__Z19kernel_multiplyConjPfS_iPfS_i
.globl _Z19kernel_multiplyConjPfS_i
.type _Z19kernel_multiplyConjPfS_i, @function
_Z19kernel_multiplyConjPfS_i:
.LFB3730:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z42__device_stub__Z19kernel_multiplyConjPfS_iPfS_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3730:
.size _Z19kernel_multiplyConjPfS_i, .-_Z19kernel_multiplyConjPfS_i
.globl _Z16inv_multiplyConjPfS_i
.type _Z16inv_multiplyConjPfS_i, @function
_Z16inv_multiplyConjPfS_i:
.LFB3697:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $32, %rsp
.cfi_def_cfa_offset 64
movq %rdi, %rbp
movq %rsi, %r12
movl %edx, %eax
shrl $31, %eax
leal (%rax,%rdx), %ebx
sarl %ebx
movl $64, 20(%rsp)
movl $1, 24(%rsp)
leal 126(%rbx), %eax
movl %ebx, %edx
addl $63, %edx
cmovns %edx, %eax
sarl $6, %eax
movl %eax, 8(%rsp)
movl $1, 12(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L64
.L61:
addq $32, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L64:
.cfi_restore_state
movl %ebx, %edx
movq %r12, %rsi
movq %rbp, %rdi
call _Z42__device_stub__Z19kernel_multiplyConjPfS_iPfS_i
jmp .L61
.cfi_endproc
.LFE3697:
.size _Z16inv_multiplyConjPfS_i, .-_Z16inv_multiplyConjPfS_i
.globl _Z34__device_stub__Z11kernel_copyPfS_iPfS_i
.type _Z34__device_stub__Z11kernel_copyPfS_iPfS_i, @function
_Z34__device_stub__Z11kernel_copyPfS_iPfS_i:
.LFB3731:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L69
.L65:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L70
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L69:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z11kernel_copyPfS_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L65
.L70:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3731:
.size _Z34__device_stub__Z11kernel_copyPfS_iPfS_i, .-_Z34__device_stub__Z11kernel_copyPfS_iPfS_i
.globl _Z11kernel_copyPfS_i
.type _Z11kernel_copyPfS_i, @function
_Z11kernel_copyPfS_i:
.LFB3732:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z34__device_stub__Z11kernel_copyPfS_iPfS_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3732:
.size _Z11kernel_copyPfS_i, .-_Z11kernel_copyPfS_i
.globl _Z8inv_copyPfS_i
.type _Z8inv_copyPfS_i, @function
_Z8inv_copyPfS_i:
.LFB3698:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $32, %rsp
.cfi_def_cfa_offset 64
movq %rdi, %rbp
movq %rsi, %r12
movl %edx, %ebx
movl $64, 20(%rsp)
movl $1, 24(%rsp)
leal 126(%rdx), %eax
addl $63, %edx
cmovns %edx, %eax
sarl $6, %eax
movl %eax, 8(%rsp)
movl $1, 12(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L76
.L73:
addq $32, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L76:
.cfi_restore_state
movl %ebx, %edx
movq %r12, %rsi
movq %rbp, %rdi
call _Z34__device_stub__Z11kernel_copyPfS_iPfS_i
jmp .L73
.cfi_endproc
.LFE3698:
.size _Z8inv_copyPfS_i, .-_Z8inv_copyPfS_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z11kernel_copyPfS_i"
.LC1:
.string "_Z19kernel_multiplyConjPfS_i"
.LC2:
.string "_Z20kernel_multiply_realPfS_i"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC3:
.string "_Z23kernel_multiply_complexPfS_i"
.align 8
.LC4:
.string "_Z20kernel_divideStable2PfS_fi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3734:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z11kernel_copyPfS_i(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z19kernel_multiplyConjPfS_i(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z20kernel_multiply_realPfS_i(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq _Z23kernel_multiply_complexPfS_i(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC4(%rip), %rdx
movq %rdx, %rcx
leaq _Z20kernel_divideStable2PfS_fi(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3734:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "Fourier.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z14inv_forwardDFTPfiii # -- Begin function _Z14inv_forwardDFTPfiii
.p2align 4, 0x90
.type _Z14inv_forwardDFTPfiii,@function
_Z14inv_forwardDFTPfiii: # @_Z14inv_forwardDFTPfiii
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $16, %rsp
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -16
movq %rdi, %rbx
leaq 8(%rsp), %rdi
cmpl $1, %ecx
jne .LBB0_2
# %bb.1:
movl $42, %ecx
callq hipfftPlan2d
jmp .LBB0_3
.LBB0_2:
movl $42, %r8d
callq hipfftPlan3d
.LBB0_3:
movq 8(%rsp), %rdi
movq %rbx, %rsi
movq %rbx, %rdx
callq hipfftExecR2C
movq 8(%rsp), %rdi
callq hipfftDestroy
addq $16, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size _Z14inv_forwardDFTPfiii, .Lfunc_end0-_Z14inv_forwardDFTPfiii
.cfi_endproc
# -- End function
.globl _Z14inv_inverseDFTPfiii # -- Begin function _Z14inv_inverseDFTPfiii
.p2align 4, 0x90
.type _Z14inv_inverseDFTPfiii,@function
_Z14inv_inverseDFTPfiii: # @_Z14inv_inverseDFTPfiii
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $16, %rsp
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -16
movq %rdi, %rbx
leaq 8(%rsp), %rdi
cmpl $1, %ecx
jne .LBB1_2
# %bb.1:
movl $44, %ecx
callq hipfftPlan2d
jmp .LBB1_3
.LBB1_2:
movl $44, %r8d
callq hipfftPlan3d
.LBB1_3:
movq 8(%rsp), %rdi
movq %rbx, %rsi
movq %rbx, %rdx
callq hipfftExecC2R
movq 8(%rsp), %rdi
callq hipfftDestroy
addq $16, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z14inv_inverseDFTPfiii, .Lfunc_end1-_Z14inv_inverseDFTPfiii
.cfi_endproc
# -- End function
.globl _Z17inv_divideStable2PfS_fi # -- Begin function _Z17inv_divideStable2PfS_fi
.p2align 4, 0x90
.type _Z17inv_divideStable2PfS_fi,@function
_Z17inv_divideStable2PfS_fi: # @_Z17inv_divideStable2PfS_fi
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $112, %rsp
.cfi_def_cfa_offset 144
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl %edx, %ebx
movss %xmm0, 4(%rsp) # 4-byte Spill
movq %rsi, %r14
movq %rdi, %r15
leal 63(%rbx), %eax
leal 126(%rbx), %edi
testl %eax, %eax
cmovnsl %eax, %edi
sarl $6, %edi
movabsq $4294967296, %rdx # imm = 0x100000000
orq %rdx, %rdi
orq $64, %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_2
# %bb.1:
movq %r15, 72(%rsp)
movq %r14, 64(%rsp)
movss 4(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
movss %xmm0, 12(%rsp)
movl %ebx, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z20kernel_divideStable2PfS_fi, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_2:
addq $112, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size _Z17inv_divideStable2PfS_fi, .Lfunc_end2-_Z17inv_divideStable2PfS_fi
.cfi_endproc
# -- End function
.globl _Z35__device_stub__kernel_divideStable2PfS_fi # -- Begin function _Z35__device_stub__kernel_divideStable2PfS_fi
.p2align 4, 0x90
.type _Z35__device_stub__kernel_divideStable2PfS_fi,@function
_Z35__device_stub__kernel_divideStable2PfS_fi: # @_Z35__device_stub__kernel_divideStable2PfS_fi
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movss %xmm0, 12(%rsp)
movl %edx, 8(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z20kernel_divideStable2PfS_fi, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end3:
.size _Z35__device_stub__kernel_divideStable2PfS_fi, .Lfunc_end3-_Z35__device_stub__kernel_divideStable2PfS_fi
.cfi_endproc
# -- End function
.globl _Z20inv_multiply_complexPfS_i # -- Begin function _Z20inv_multiply_complexPfS_i
.p2align 4, 0x90
.type _Z20inv_multiply_complexPfS_i,@function
_Z20inv_multiply_complexPfS_i: # @_Z20inv_multiply_complexPfS_i
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $112, %rsp
.cfi_def_cfa_offset 144
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rsi, %rbx
movq %rdi, %r14
movl %edx, %r15d
shrl $31, %r15d
addl %edx, %r15d
sarl %r15d
leal 63(%r15), %eax
leal 126(%r15), %edi
testl %eax, %eax
cmovnsl %eax, %edi
sarl $6, %edi
movabsq $4294967296, %rdx # imm = 0x100000000
orq %rdx, %rdi
orq $64, %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB4_2
# %bb.1:
movq %r14, 72(%rsp)
movq %rbx, 64(%rsp)
movl %r15d, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z23kernel_multiply_complexPfS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB4_2:
addq $112, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end4:
.size _Z20inv_multiply_complexPfS_i, .Lfunc_end4-_Z20inv_multiply_complexPfS_i
.cfi_endproc
# -- End function
.globl _Z38__device_stub__kernel_multiply_complexPfS_i # -- Begin function _Z38__device_stub__kernel_multiply_complexPfS_i
.p2align 4, 0x90
.type _Z38__device_stub__kernel_multiply_complexPfS_i,@function
_Z38__device_stub__kernel_multiply_complexPfS_i: # @_Z38__device_stub__kernel_multiply_complexPfS_i
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z23kernel_multiply_complexPfS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end5:
.size _Z38__device_stub__kernel_multiply_complexPfS_i, .Lfunc_end5-_Z38__device_stub__kernel_multiply_complexPfS_i
.cfi_endproc
# -- End function
.globl _Z17inv_multiply_realPfS_i # -- Begin function _Z17inv_multiply_realPfS_i
.p2align 4, 0x90
.type _Z17inv_multiply_realPfS_i,@function
_Z17inv_multiply_realPfS_i: # @_Z17inv_multiply_realPfS_i
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $112, %rsp
.cfi_def_cfa_offset 144
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl %edx, %ebx
movq %rsi, %r14
movq %rdi, %r15
leal 63(%rbx), %eax
leal 126(%rbx), %edi
testl %eax, %eax
cmovnsl %eax, %edi
sarl $6, %edi
movabsq $4294967296, %rdx # imm = 0x100000000
orq %rdx, %rdi
orq $64, %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB6_2
# %bb.1:
movq %r15, 72(%rsp)
movq %r14, 64(%rsp)
movl %ebx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z20kernel_multiply_realPfS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB6_2:
addq $112, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end6:
.size _Z17inv_multiply_realPfS_i, .Lfunc_end6-_Z17inv_multiply_realPfS_i
.cfi_endproc
# -- End function
.globl _Z35__device_stub__kernel_multiply_realPfS_i # -- Begin function _Z35__device_stub__kernel_multiply_realPfS_i
.p2align 4, 0x90
.type _Z35__device_stub__kernel_multiply_realPfS_i,@function
_Z35__device_stub__kernel_multiply_realPfS_i: # @_Z35__device_stub__kernel_multiply_realPfS_i
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z20kernel_multiply_realPfS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end7:
.size _Z35__device_stub__kernel_multiply_realPfS_i, .Lfunc_end7-_Z35__device_stub__kernel_multiply_realPfS_i
.cfi_endproc
# -- End function
.globl _Z16inv_multiplyConjPfS_i # -- Begin function _Z16inv_multiplyConjPfS_i
.p2align 4, 0x90
.type _Z16inv_multiplyConjPfS_i,@function
_Z16inv_multiplyConjPfS_i: # @_Z16inv_multiplyConjPfS_i
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $112, %rsp
.cfi_def_cfa_offset 144
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rsi, %rbx
movq %rdi, %r14
movl %edx, %r15d
shrl $31, %r15d
addl %edx, %r15d
sarl %r15d
leal 63(%r15), %eax
leal 126(%r15), %edi
testl %eax, %eax
cmovnsl %eax, %edi
sarl $6, %edi
movabsq $4294967296, %rdx # imm = 0x100000000
orq %rdx, %rdi
orq $64, %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB8_2
# %bb.1:
movq %r14, 72(%rsp)
movq %rbx, 64(%rsp)
movl %r15d, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z19kernel_multiplyConjPfS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB8_2:
addq $112, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end8:
.size _Z16inv_multiplyConjPfS_i, .Lfunc_end8-_Z16inv_multiplyConjPfS_i
.cfi_endproc
# -- End function
.globl _Z34__device_stub__kernel_multiplyConjPfS_i # -- Begin function _Z34__device_stub__kernel_multiplyConjPfS_i
.p2align 4, 0x90
.type _Z34__device_stub__kernel_multiplyConjPfS_i,@function
_Z34__device_stub__kernel_multiplyConjPfS_i: # @_Z34__device_stub__kernel_multiplyConjPfS_i
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z19kernel_multiplyConjPfS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end9:
.size _Z34__device_stub__kernel_multiplyConjPfS_i, .Lfunc_end9-_Z34__device_stub__kernel_multiplyConjPfS_i
.cfi_endproc
# -- End function
.globl _Z8inv_copyPfS_i # -- Begin function _Z8inv_copyPfS_i
.p2align 4, 0x90
.type _Z8inv_copyPfS_i,@function
_Z8inv_copyPfS_i: # @_Z8inv_copyPfS_i
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $112, %rsp
.cfi_def_cfa_offset 144
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl %edx, %ebx
movq %rsi, %r14
movq %rdi, %r15
leal 63(%rbx), %eax
leal 126(%rbx), %edi
testl %eax, %eax
cmovnsl %eax, %edi
sarl $6, %edi
movabsq $4294967296, %rdx # imm = 0x100000000
orq %rdx, %rdi
orq $64, %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB10_2
# %bb.1:
movq %r15, 72(%rsp)
movq %r14, 64(%rsp)
movl %ebx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z11kernel_copyPfS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB10_2:
addq $112, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end10:
.size _Z8inv_copyPfS_i, .Lfunc_end10-_Z8inv_copyPfS_i
.cfi_endproc
# -- End function
.globl _Z26__device_stub__kernel_copyPfS_i # -- Begin function _Z26__device_stub__kernel_copyPfS_i
.p2align 4, 0x90
.type _Z26__device_stub__kernel_copyPfS_i,@function
_Z26__device_stub__kernel_copyPfS_i: # @_Z26__device_stub__kernel_copyPfS_i
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z11kernel_copyPfS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end11:
.size _Z26__device_stub__kernel_copyPfS_i, .Lfunc_end11-_Z26__device_stub__kernel_copyPfS_i
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB12_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB12_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z20kernel_divideStable2PfS_fi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z23kernel_multiply_complexPfS_i, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z20kernel_multiply_realPfS_i, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z19kernel_multiplyConjPfS_i, %esi
movl $.L__unnamed_4, %edx
movl $.L__unnamed_4, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z11kernel_copyPfS_i, %esi
movl $.L__unnamed_5, %edx
movl $.L__unnamed_5, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end12:
.size __hip_module_ctor, .Lfunc_end12-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB13_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB13_2:
retq
.Lfunc_end13:
.size __hip_module_dtor, .Lfunc_end13-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z20kernel_divideStable2PfS_fi,@object # @_Z20kernel_divideStable2PfS_fi
.section .rodata,"a",@progbits
.globl _Z20kernel_divideStable2PfS_fi
.p2align 3, 0x0
_Z20kernel_divideStable2PfS_fi:
.quad _Z35__device_stub__kernel_divideStable2PfS_fi
.size _Z20kernel_divideStable2PfS_fi, 8
.type _Z23kernel_multiply_complexPfS_i,@object # @_Z23kernel_multiply_complexPfS_i
.globl _Z23kernel_multiply_complexPfS_i
.p2align 3, 0x0
_Z23kernel_multiply_complexPfS_i:
.quad _Z38__device_stub__kernel_multiply_complexPfS_i
.size _Z23kernel_multiply_complexPfS_i, 8
.type _Z20kernel_multiply_realPfS_i,@object # @_Z20kernel_multiply_realPfS_i
.globl _Z20kernel_multiply_realPfS_i
.p2align 3, 0x0
_Z20kernel_multiply_realPfS_i:
.quad _Z35__device_stub__kernel_multiply_realPfS_i
.size _Z20kernel_multiply_realPfS_i, 8
.type _Z19kernel_multiplyConjPfS_i,@object # @_Z19kernel_multiplyConjPfS_i
.globl _Z19kernel_multiplyConjPfS_i
.p2align 3, 0x0
_Z19kernel_multiplyConjPfS_i:
.quad _Z34__device_stub__kernel_multiplyConjPfS_i
.size _Z19kernel_multiplyConjPfS_i, 8
.type _Z11kernel_copyPfS_i,@object # @_Z11kernel_copyPfS_i
.globl _Z11kernel_copyPfS_i
.p2align 3, 0x0
_Z11kernel_copyPfS_i:
.quad _Z26__device_stub__kernel_copyPfS_i
.size _Z11kernel_copyPfS_i, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z20kernel_divideStable2PfS_fi"
.size .L__unnamed_1, 31
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z23kernel_multiply_complexPfS_i"
.size .L__unnamed_2, 33
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "_Z20kernel_multiply_realPfS_i"
.size .L__unnamed_3, 30
.type .L__unnamed_4,@object # @3
.L__unnamed_4:
.asciz "_Z19kernel_multiplyConjPfS_i"
.size .L__unnamed_4, 29
.type .L__unnamed_5,@object # @4
.L__unnamed_5:
.asciz "_Z11kernel_copyPfS_i"
.size .L__unnamed_5, 21
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z35__device_stub__kernel_divideStable2PfS_fi
.addrsig_sym _Z38__device_stub__kernel_multiply_complexPfS_i
.addrsig_sym _Z35__device_stub__kernel_multiply_realPfS_i
.addrsig_sym _Z34__device_stub__kernel_multiplyConjPfS_i
.addrsig_sym _Z26__device_stub__kernel_copyPfS_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z20kernel_divideStable2PfS_fi
.addrsig_sym _Z23kernel_multiply_complexPfS_i
.addrsig_sym _Z20kernel_multiply_realPfS_i
.addrsig_sym _Z19kernel_multiplyConjPfS_i
.addrsig_sym _Z11kernel_copyPfS_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80 | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | /* Enunciado:
* Multiplicacion de Matrices MxN (16x16) por Bloques en CUDA
*/
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
cudaError_t prodMatricesCuda(int *c, const int *a, const int *b, unsigned int Width);
const int TILE_WIDTH = 4;//Se ha establecido un tamaño de tesela de 4 hilos
__global__ void productoKernel(int *c, const int *a, const int *b, unsigned int Width)
{
int id_fil = blockIdx.y * TILE_WIDTH + threadIdx.y;
int id_col = blockIdx.x * TILE_WIDTH + threadIdx.x;
int n = 0;
if ((id_fil < Width) && (id_col < Width)) {//Si el hilo está fuera de los valores, no debe actuar
for (int i = 0; i < Width; i++) {
n = n + (a[id_fil*Width + i] * b[i*Width + id_col]);
}
c[id_fil*Width + id_col] = n;
}
}
//Hace uso de ceil para obtener el tamaño del Grid de Bloques
int grid_calc(int Width, int Tile_Width) {
double x = Width;
double y = Tile_Width;
return (int)(ceil(x / y));//redondea hacia arriba la división
}
void imprimeMatriz(int *v, int m, int n) {//( m * n )
int i, j, x;
int ws;//numero de espacios de caracteres por casilla
printf("\n");
for (i = 0; i < m; i++) {
for (j = 0; j < n; j++) {
ws = 5;
x = v[i*m + j];
if (x < 0) {//si es negativo, se ocupa un hueco por el signo "-"
ws--;
x = -1 * x;
}
else {//para alinear los dígitos
ws--;
printf(" ");
}
do {//Se ocupa un hueco por digito del numero
ws--;
x = x / 10;
} while (x > 0);
printf("%d", v[i*m + j]);//imprimimos el numero
while (ws > 0) {//y ocupamos el resto de huecos con espacios en blanco
printf(" ");
ws--;
}
}
printf("\n");
}
}
void imprimeMatriz(int *v, int m) {//Para matrices cuadradas ( m * m )
int i, j, x;
int ws;//numero de espacios de caracteres por casilla
printf("\n");
for (i = 0; i < m; i++) {
for (j = 0; j < m; j++) {
ws = 5;
x = v[i*m + j];
if (x < 0) {//si es negativo, se ocupa un hueco por el signo "-"
ws--;
x = -1 * x;
}
else {//para alinear los dígitos
ws--;
printf(" ");
}
do {//Se ocupa un hueco por digito del numero
ws--;
x = x / 10;
} while (x > 0);
printf("%d", v[i*m + j]);//imprimimos el numero
while (ws > 0) {//y ocupamos el resto de huecos con espacios en blanco
printf(" ");
ws--;
}
}
printf("\n");
}
}
void generaMatriz(int *v, int m, int n, int max, int min) {//( m * n )
int i, j;
for (i = 0; i < m; i++) {
for (j = 0; j < n; j++) {
v[i*n + j] = (rand() % (max - min)) + min;
}
}
}
void generaMatriz(int *v, int m, int max, int min) {//Para matrices cuadradas ( m * m )
int i, j;
for (i = 0; i < m; i++) {
for (j = 0; j < m; j++) {
v[i*m + j] = (rand() % (max - min)) + min;
}
}
}
int main()
{
/*const int Width = 6;//Pruebas
int a[6 * 6] = { 8, 7, 3, -4, -2, -3,
8, 9, -6, 3, -1, -4,
-6, -1, -10, -7, 8, 6,
4, -6, -6, -3, 8, 7,
-1, -1, -7, -8, -1, 9,
7, 8, 3, 7, 2, 3 };
int b[6 * 6] = { 0, 9, 9, 5, -10, 6,
-2, 3, 8, 4, 0, -9,
7, 1, -8, -9, -10, -9,
-3, -5, 7, -2, 6, 4,
7, 9, -3, -9, -9, 6,
6, 6, 4, -8, 8, -5 };
//Resultado de a * b =
// -13 80 70 91 -140 -55
// -100 45 200 165 -25 47
// 45 76 -31 -50 94 53
// 77 141 19 -72 -14 133
// 24 66 22 7 113 -17
// 16 91 158 -16 -52 -32*/
srand((unsigned int)time(0));
const int max = 10;
const int min = -10;
const int Width = 16;
int a[Width * Width] = { 0 };
generaMatriz(a, Width, max, min);
int b[Width * Width] = { 0 };
generaMatriz(b, Width, max, min);
int c[Width * Width] = { 0 };
// Add vectors in parallel.
cudaError_t cudaStatus = prodMatricesCuda(c, a, b, Width);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("\n\tMatriz A\n");
imprimeMatriz(a, Width);
printf("\n\tMatriz B\n");
imprimeMatriz(b, Width);
printf("\n\tResultado del producto:\n");
imprimeMatriz(c, Width);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t prodMatricesCuda(int *c, const int *a, const int *b, unsigned int Width)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
dim3 DimGrid(grid_calc(Width, TILE_WIDTH), grid_calc(Width, TILE_WIDTH));
dim3 DimBlock(TILE_WIDTH, TILE_WIDTH);
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, Width * Width * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, Width * Width * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, Width * Width * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, Width * Width * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, Width * Width * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
productoKernel<<<DimGrid, DimBlock>>>(dev_c, dev_a, dev_b, Width);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, Width * Width * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
} | code for sm_80
Function : _Z14productoKernelPiPKiS1_j
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R17, SR_TID.X ; /* 0x0000000000117919 */
/* 0x000e280000002100 */
/*0030*/ S2R R19, SR_CTAID.Y ; /* 0x0000000000137919 */
/* 0x000e680000002600 */
/*0040*/ S2R R2, SR_TID.Y ; /* 0x0000000000027919 */
/* 0x000e620000002200 */
/*0050*/ IMAD R16, R0, 0x4, R17 ; /* 0x0000000400107824 */
/* 0x001fca00078e0211 */
/*0060*/ ISETP.GE.U32.AND P0, PT, R16, c[0x0][0x178], PT ; /* 0x00005e0010007a0c */
/* 0x000fe40003f06070 */
/*0070*/ LEA R19, R19, R2, 0x2 ; /* 0x0000000213137211 */
/* 0x002fc800078e10ff */
/*0080*/ ISETP.GE.U32.OR P0, PT, R19, c[0x0][0x178], P0 ; /* 0x00005e0013007a0c */
/* 0x000fda0000706470 */
/*0090*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00a0*/ IMAD.MOV.U32 R21, RZ, RZ, c[0x0][0x178] ; /* 0x00005e00ff157624 */
/* 0x000fe200078e00ff */
/*00b0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*00c0*/ HFMA2.MMA R20, -RZ, RZ, 0, 0 ; /* 0x00000000ff147435 */
/* 0x000fe200000001ff */
/*00d0*/ IMAD.MOV.U32 R27, RZ, RZ, RZ ; /* 0x000000ffff1b7224 */
/* 0x000fe400078e00ff */
/*00e0*/ IADD3 R2, R21.reuse, -0x1, RZ ; /* 0xffffffff15027810 */
/* 0x040fe40007ffe0ff */
/*00f0*/ LOP3.LUT R18, R21, 0x3, RZ, 0xc0, !PT ; /* 0x0000000315127812 */
/* 0x000fe400078ec0ff */
/*0100*/ ISETP.GE.U32.AND P1, PT, R2, 0x3, PT ; /* 0x000000030200780c */
/* 0x000fe40003f26070 */
/*0110*/ ISETP.NE.AND P0, PT, R18, RZ, PT ; /* 0x000000ff1200720c */
/* 0x000fd60003f05270 */
/*0120*/ @!P1 BRA 0x3c0 ; /* 0x0000029000009947 */
/* 0x000fea0003800000 */
/*0130*/ IADD3 R3, R17, c[0x0][0x178], RZ ; /* 0x00005e0011037a10 */
/* 0x000fe20007ffe0ff */
/*0140*/ IMAD R25, R19, R21, 0x3 ; /* 0x0000000313197424 */
/* 0x000fe200078e0215 */
/*0150*/ LEA R22, R21.reuse, R16.reuse, 0x1 ; /* 0x0000001015167211 */
/* 0x0c0fe200078e08ff */
/*0160*/ IMAD R24, R21, 0x3, R16 ; /* 0x0000000315187824 */
/* 0x000fe200078e0210 */
/*0170*/ IADD3 R23, R18, -c[0x0][0x178], RZ ; /* 0x80005e0012177a10 */
/* 0x000fe20007ffe0ff */
/*0180*/ IMAD.MOV.U32 R20, RZ, RZ, RZ ; /* 0x000000ffff147224 */
/* 0x000fe200078e00ff */
/*0190*/ MOV R26, R16 ; /* 0x00000010001a7202 */
/* 0x000fe20000000f00 */
/*01a0*/ IMAD R28, R0, 0x4, R3 ; /* 0x00000004001c7824 */
/* 0x000fe400078e0203 */
/*01b0*/ MOV R13, 0x4 ; /* 0x00000004000d7802 */
/* 0x000fe40000000f00 */
/*01c0*/ IADD3 R2, R25.reuse, -0x3, RZ ; /* 0xfffffffd19027810 */
/* 0x040fe40007ffe0ff */
/*01d0*/ IADD3 R4, R25.reuse, -0x2, RZ ; /* 0xfffffffe19047810 */
/* 0x040fe20007ffe0ff */
/*01e0*/ IMAD.WIDE.U32 R14, R26, R13, c[0x0][0x170] ; /* 0x00005c001a0e7625 */
/* 0x000fe200078e000d */
/*01f0*/ IADD3 R8, R25, -0x1, RZ ; /* 0xffffffff19087810 */
/* 0x000fc60007ffe0ff */
/*0200*/ IMAD.WIDE.U32 R2, R2, R13.reuse, c[0x0][0x168] ; /* 0x00005a0002027625 */
/* 0x080fe400078e000d */
/*0210*/ LDG.E R14, [R14.64] ; /* 0x000000040e0e7981 */
/* 0x000ea4000c1e1900 */
/*0220*/ IMAD.WIDE.U32 R4, R4, R13.reuse, c[0x0][0x168] ; /* 0x00005a0004047625 */
/* 0x080fe400078e000d */
/*0230*/ LDG.E R29, [R2.64] ; /* 0x00000004021d7981 */
/* 0x0000a4000c1e1900 */
/*0240*/ IMAD.WIDE.U32 R6, R28, R13.reuse, c[0x0][0x170] ; /* 0x00005c001c067625 */
/* 0x080fe400078e000d */
/*0250*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ee8000c1e1900 */
/*0260*/ LDG.E R7, [R6.64] ; /* 0x0000000406077981 */
/* 0x000ee2000c1e1900 */
/*0270*/ IMAD.WIDE.U32 R8, R8, R13, c[0x0][0x168] ; /* 0x00005a0008087625 */
/* 0x000fc800078e000d */
/*0280*/ IMAD.WIDE.U32 R2, R22, R13.reuse, c[0x0][0x170] ; /* 0x00005c0016027625 */
/* 0x081fe400078e000d */
/*0290*/ LDG.E R8, [R8.64] ; /* 0x0000000408087981 */
/* 0x000f24000c1e1900 */
/*02a0*/ IMAD.WIDE.U32 R10, R25, R13.reuse, c[0x0][0x168] ; /* 0x00005a00190a7625 */
/* 0x080fe400078e000d */
/*02b0*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000f24000c1e1900 */
/*02c0*/ IMAD.WIDE.U32 R12, R24, R13, c[0x0][0x170] ; /* 0x00005c00180c7625 */
/* 0x000fe400078e000d */
/*02d0*/ LDG.E R10, [R10.64] ; /* 0x000000040a0a7981 */
/* 0x000f68000c1e1900 */
/*02e0*/ LDG.E R12, [R12.64] ; /* 0x000000040c0c7981 */
/* 0x000f62000c1e1900 */
/*02f0*/ IADD3 R20, R20, 0x4, RZ ; /* 0x0000000414147810 */
/* 0x000fe20007ffe0ff */
/*0300*/ IMAD R22, R21.reuse, 0x4, R22 ; /* 0x0000000415167824 */
/* 0x040fe200078e0216 */
/*0310*/ LEA R28, R21, R28, 0x2 ; /* 0x0000001c151c7211 */
/* 0x000fc400078e10ff */
/*0320*/ LEA R26, R21, R26, 0x2 ; /* 0x0000001a151a7211 */
/* 0x000fe400078e10ff */
/*0330*/ IADD3 R25, R25, 0x4, RZ ; /* 0x0000000419197810 */
/* 0x000fe40007ffe0ff */
/*0340*/ LEA R24, R21, R24, 0x2 ; /* 0x0000001815187211 */
/* 0x000fe200078e10ff */
/*0350*/ IMAD R14, R14, R29, R27 ; /* 0x0000001d0e0e7224 */
/* 0x004fc800078e021b */
/*0360*/ IMAD R5, R7, R4, R14 ; /* 0x0000000407057224 */
/* 0x008fe400078e020e */
/*0370*/ IMAD.IADD R4, R23, 0x1, R20 ; /* 0x0000000117047824 */
/* 0x000fca00078e0214 */
/*0380*/ ISETP.NE.AND P1, PT, R4, RZ, PT ; /* 0x000000ff0400720c */
/* 0x000fe20003f25270 */
/*0390*/ IMAD R5, R2, R8, R5 ; /* 0x0000000802057224 */
/* 0x010fc800078e0205 */
/*03a0*/ IMAD R27, R12, R10, R5 ; /* 0x0000000a0c1b7224 */
/* 0x020fd000078e0205 */
/*03b0*/ @P1 BRA 0x1b0 ; /* 0xfffffdf000001947 */
/* 0x000fea000383ffff */
/*03c0*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fe400078e00ff */
/*03d0*/ IMAD R2, R19, c[0x0][0x178], R16 ; /* 0x00005e0013027a24 */
/* 0x000fc800078e0210 */
/*03e0*/ IMAD.WIDE.U32 R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fe200078e0003 */
/*03f0*/ @!P0 BRA 0x4e0 ; /* 0x000000e000008947 */
/* 0x000fea0003800000 */
/*0400*/ IMAD R17, R20, c[0x0][0x178], R17 ; /* 0x00005e0014117a24 */
/* 0x000fe400078e0211 */
/*0410*/ IMAD R19, R19, c[0x0][0x178], R20 ; /* 0x00005e0013137a24 */
/* 0x000fc600078e0214 */
/*0420*/ LEA R0, R0, R17, 0x2 ; /* 0x0000001100007211 */
/* 0x000fe400078e10ff */
/*0430*/ HFMA2.MMA R6, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff067435 */
/* 0x000fd400000001ff */
/*0440*/ IMAD.WIDE.U32 R4, R19, R6, c[0x0][0x168] ; /* 0x00005a0013047625 */
/* 0x000fc800078e0006 */
/*0450*/ IMAD.WIDE.U32 R6, R0, R6, c[0x0][0x170] ; /* 0x00005c0000067625 */
/* 0x000fe400078e0006 */
/*0460*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*0470*/ LDG.E R6, [R6.64] ; /* 0x0000000406067981 */
/* 0x000ea2000c1e1900 */
/*0480*/ IADD3 R18, R18, -0x1, RZ ; /* 0xffffffff12127810 */
/* 0x000fe40007ffe0ff */
/*0490*/ IADD3 R19, R19, 0x1, RZ ; /* 0x0000000113137810 */
/* 0x000fe40007ffe0ff */
/*04a0*/ ISETP.NE.AND P0, PT, R18, RZ, PT ; /* 0x000000ff1200720c */
/* 0x000fc40003f05270 */
/*04b0*/ IADD3 R0, R0, c[0x0][0x178], RZ ; /* 0x00005e0000007a10 */
/* 0x000fe20007ffe0ff */
/*04c0*/ IMAD R27, R6, R4, R27 ; /* 0x00000004061b7224 */
/* 0x004fd400078e021b */
/*04d0*/ @P0 BRA 0x430 ; /* 0xffffff5000000947 */
/* 0x000fea000383ffff */
/*04e0*/ STG.E [R2.64], R27 ; /* 0x0000001b02007986 */
/* 0x000fe2000c101904 */
/*04f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0500*/ BRA 0x500; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0510*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0520*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0530*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0540*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0550*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0560*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0570*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0580*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0590*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | /* Enunciado:
* Multiplicacion de Matrices MxN (16x16) por Bloques en CUDA
*/
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
cudaError_t prodMatricesCuda(int *c, const int *a, const int *b, unsigned int Width);
const int TILE_WIDTH = 4;//Se ha establecido un tamaño de tesela de 4 hilos
__global__ void productoKernel(int *c, const int *a, const int *b, unsigned int Width)
{
int id_fil = blockIdx.y * TILE_WIDTH + threadIdx.y;
int id_col = blockIdx.x * TILE_WIDTH + threadIdx.x;
int n = 0;
if ((id_fil < Width) && (id_col < Width)) {//Si el hilo está fuera de los valores, no debe actuar
for (int i = 0; i < Width; i++) {
n = n + (a[id_fil*Width + i] * b[i*Width + id_col]);
}
c[id_fil*Width + id_col] = n;
}
}
//Hace uso de ceil para obtener el tamaño del Grid de Bloques
int grid_calc(int Width, int Tile_Width) {
double x = Width;
double y = Tile_Width;
return (int)(ceil(x / y));//redondea hacia arriba la división
}
void imprimeMatriz(int *v, int m, int n) {//( m * n )
int i, j, x;
int ws;//numero de espacios de caracteres por casilla
printf("\n");
for (i = 0; i < m; i++) {
for (j = 0; j < n; j++) {
ws = 5;
x = v[i*m + j];
if (x < 0) {//si es negativo, se ocupa un hueco por el signo "-"
ws--;
x = -1 * x;
}
else {//para alinear los dígitos
ws--;
printf(" ");
}
do {//Se ocupa un hueco por digito del numero
ws--;
x = x / 10;
} while (x > 0);
printf("%d", v[i*m + j]);//imprimimos el numero
while (ws > 0) {//y ocupamos el resto de huecos con espacios en blanco
printf(" ");
ws--;
}
}
printf("\n");
}
}
void imprimeMatriz(int *v, int m) {//Para matrices cuadradas ( m * m )
int i, j, x;
int ws;//numero de espacios de caracteres por casilla
printf("\n");
for (i = 0; i < m; i++) {
for (j = 0; j < m; j++) {
ws = 5;
x = v[i*m + j];
if (x < 0) {//si es negativo, se ocupa un hueco por el signo "-"
ws--;
x = -1 * x;
}
else {//para alinear los dígitos
ws--;
printf(" ");
}
do {//Se ocupa un hueco por digito del numero
ws--;
x = x / 10;
} while (x > 0);
printf("%d", v[i*m + j]);//imprimimos el numero
while (ws > 0) {//y ocupamos el resto de huecos con espacios en blanco
printf(" ");
ws--;
}
}
printf("\n");
}
}
void generaMatriz(int *v, int m, int n, int max, int min) {//( m * n )
int i, j;
for (i = 0; i < m; i++) {
for (j = 0; j < n; j++) {
v[i*n + j] = (rand() % (max - min)) + min;
}
}
}
void generaMatriz(int *v, int m, int max, int min) {//Para matrices cuadradas ( m * m )
int i, j;
for (i = 0; i < m; i++) {
for (j = 0; j < m; j++) {
v[i*m + j] = (rand() % (max - min)) + min;
}
}
}
int main()
{
/*const int Width = 6;//Pruebas
int a[6 * 6] = { 8, 7, 3, -4, -2, -3,
8, 9, -6, 3, -1, -4,
-6, -1, -10, -7, 8, 6,
4, -6, -6, -3, 8, 7,
-1, -1, -7, -8, -1, 9,
7, 8, 3, 7, 2, 3 };
int b[6 * 6] = { 0, 9, 9, 5, -10, 6,
-2, 3, 8, 4, 0, -9,
7, 1, -8, -9, -10, -9,
-3, -5, 7, -2, 6, 4,
7, 9, -3, -9, -9, 6,
6, 6, 4, -8, 8, -5 };
//Resultado de a * b =
// -13 80 70 91 -140 -55
// -100 45 200 165 -25 47
// 45 76 -31 -50 94 53
// 77 141 19 -72 -14 133
// 24 66 22 7 113 -17
// 16 91 158 -16 -52 -32*/
srand((unsigned int)time(0));
const int max = 10;
const int min = -10;
const int Width = 16;
int a[Width * Width] = { 0 };
generaMatriz(a, Width, max, min);
int b[Width * Width] = { 0 };
generaMatriz(b, Width, max, min);
int c[Width * Width] = { 0 };
// Add vectors in parallel.
cudaError_t cudaStatus = prodMatricesCuda(c, a, b, Width);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("\n\tMatriz A\n");
imprimeMatriz(a, Width);
printf("\n\tMatriz B\n");
imprimeMatriz(b, Width);
printf("\n\tResultado del producto:\n");
imprimeMatriz(c, Width);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t prodMatricesCuda(int *c, const int *a, const int *b, unsigned int Width)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
dim3 DimGrid(grid_calc(Width, TILE_WIDTH), grid_calc(Width, TILE_WIDTH));
dim3 DimBlock(TILE_WIDTH, TILE_WIDTH);
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, Width * Width * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, Width * Width * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, Width * Width * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, Width * Width * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, Width * Width * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
productoKernel<<<DimGrid, DimBlock>>>(dev_c, dev_a, dev_b, Width);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, Width * Width * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
} | .file "tmpxft_000a2249_00000000-6_kernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2066:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2066:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z9grid_calcii
.type _Z9grid_calcii, @function
_Z9grid_calcii:
.LFB2057:
.cfi_startproc
endbr64
pxor %xmm0, %xmm0
cvtsi2sdl %edi, %xmm0
pxor %xmm1, %xmm1
cvtsi2sdl %esi, %xmm1
divsd %xmm1, %xmm0
movapd %xmm0, %xmm3
movsd .LC3(%rip), %xmm2
movapd %xmm0, %xmm1
andpd %xmm2, %xmm1
movsd .LC0(%rip), %xmm4
ucomisd %xmm1, %xmm4
jbe .L4
cvttsd2siq %xmm0, %rax
pxor %xmm1, %xmm1
cvtsi2sdq %rax, %xmm1
cmpnlesd %xmm1, %xmm3
movsd .LC2(%rip), %xmm4
andpd %xmm4, %xmm3
addsd %xmm1, %xmm3
andnpd %xmm0, %xmm2
orpd %xmm2, %xmm3
.L4:
cvttsd2sil %xmm3, %eax
ret
.cfi_endproc
.LFE2057:
.size _Z9grid_calcii, .-_Z9grid_calcii
.section .rodata.str1.1,"aMS",@progbits,1
.LC4:
.string "\n"
.LC5:
.string " "
.LC6:
.string "%d"
.text
.globl _Z13imprimeMatrizPiii
.type _Z13imprimeMatrizPiii, @function
_Z13imprimeMatrizPiii:
.LFB2058:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $40, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 16(%rsp)
movl %esi, %ebx
movl %edx, %r15d
movl %edx, 12(%rsp)
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
testl %ebx, %ebx
jle .L5
movl $0, %r14d
movl $0, %ebp
movslq %r15d, %rax
movq %rax, 24(%rsp)
leaq .LC5(%rip), %r12
jmp .L7
.L8:
movq %r12, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
.L9:
movl $4, %ebx
.L10:
subl $1, %ebx
movl %ebp, %edx
movslq %ebp, %rax
imulq $1717986919, %rax, %rax
sarq $34, %rax
movl %ebp, %ecx
sarl $31, %ecx
subl %ecx, %eax
movl %eax, %ebp
cmpl $9, %edx
jg .L10
movl (%r14), %edx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
testl %ebx, %ebx
jle .L11
.L12:
movq %r12, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
subl $1, %ebx
jne .L12
.L11:
addq $4, %r13
cmpq %r15, %r13
je .L20
.L13:
movq %r13, %r14
movl 0(%r13), %ebp
testl %ebp, %ebp
jns .L8
negl %ebp
jmp .L9
.L20:
movl (%rsp), %ebp
movl 4(%rsp), %r14d
movl 8(%rsp), %ebx
.L15:
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addl $1, %ebp
addl %ebx, %r14d
cmpl %ebp, %ebx
je .L5
.L7:
cmpl $0, 12(%rsp)
jle .L15
movslq %r14d, %rax
movq 16(%rsp), %rdi
leaq (%rdi,%rax,4), %r13
movq 24(%rsp), %rsi
addq %rsi, %rax
leaq (%rdi,%rax,4), %r15
movl %ebp, (%rsp)
movl %r14d, 4(%rsp)
movl %ebx, 8(%rsp)
jmp .L13
.L5:
addq $40, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2058:
.size _Z13imprimeMatrizPiii, .-_Z13imprimeMatrizPiii
.globl _Z13imprimeMatrizPii
.type _Z13imprimeMatrizPii, @function
_Z13imprimeMatrizPii:
.LFB2059:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $40, %rsp
.cfi_def_cfa_offset 96
movq %rdi, %r15
movl %esi, %ebx
movl %esi, 20(%rsp)
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
testl %ebx, %ebx
jle .L21
movl %ebx, %eax
cltq
leaq 0(,%rax,4), %rdi
movq %rdi, 8(%rsp)
addq %rdi, %r15
negq %rax
salq $2, %rax
movq %rax, 24(%rsp)
movl $0, 16(%rsp)
leaq .LC5(%rip), %r12
jmp .L23
.L24:
movq %r12, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
.L25:
movl $4, %ebx
.L26:
subl $1, %ebx
movl %ebp, %edx
movslq %ebp, %rax
imulq $1717986919, %rax, %rax
sarq $34, %rax
movl %ebp, %ecx
sarl $31, %ecx
subl %ecx, %eax
movl %eax, %ebp
cmpl $9, %edx
jg .L26
movl (%r14), %edx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
testl %ebx, %ebx
jle .L27
.L28:
movq %r12, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
subl $1, %ebx
jne .L28
.L27:
addq $4, %r13
cmpq %r15, %r13
je .L34
.L29:
movq %r13, %r14
movl 0(%r13), %ebp
testl %ebp, %ebp
jns .L24
negl %ebp
jmp .L25
.L34:
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addl $1, 16(%rsp)
movl 16(%rsp), %eax
movq 8(%rsp), %rdi
addq %rdi, %r15
cmpl %eax, 20(%rsp)
je .L21
.L23:
movq 24(%rsp), %rax
leaq (%r15,%rax), %r13
jmp .L29
.L21:
addq $40, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2059:
.size _Z13imprimeMatrizPii, .-_Z13imprimeMatrizPii
.globl _Z12generaMatrizPiiiii
.type _Z12generaMatrizPiiiii, @function
_Z12generaMatrizPiiiii:
.LFB2060:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $40, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 8(%rsp)
movl %esi, 16(%rsp)
movl %edx, 4(%rsp)
movl %ecx, 20(%rsp)
testl %esi, %esi
jle .L35
movl %r8d, %r12d
movl $0, %r15d
movl $0, %r14d
movslq %edx, %rax
movq %rax, 24(%rsp)
jmp .L37
.L39:
movslq %r15d, %rax
movq 8(%rsp), %rcx
leaq (%rcx,%rax,4), %rbx
movq 24(%rsp), %rsi
addq %rsi, %rax
leaq (%rcx,%rax,4), %r13
movl 20(%rsp), %ebp
subl %r12d, %ebp
.L38:
call rand@PLT
cltd
idivl %ebp
addl %r12d, %edx
movl %edx, (%rbx)
addq $4, %rbx
cmpq %r13, %rbx
jne .L38
.L40:
addl $1, %r14d
movl 4(%rsp), %eax
addl %eax, %r15d
cmpl %r14d, 16(%rsp)
je .L35
.L37:
cmpl $0, 4(%rsp)
jg .L39
jmp .L40
.L35:
addq $40, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _Z12generaMatrizPiiiii, .-_Z12generaMatrizPiiiii
.globl _Z12generaMatrizPiiii
.type _Z12generaMatrizPiiii, @function
_Z12generaMatrizPiiii:
.LFB2061:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $24, %rsp
.cfi_def_cfa_offset 80
movl %esi, 4(%rsp)
testl %esi, %esi
jle .L43
movl %ecx, %r13d
movslq %esi, %rax
leaq 0(,%rax,4), %r15
leaq (%rdi,%r15), %r12
negq %rax
salq $2, %rax
movq %rax, 8(%rsp)
movl $0, %r14d
subl %ecx, %edx
movl %edx, %ebp
.L45:
movq 8(%rsp), %rax
leaq (%r12,%rax), %rbx
.L46:
call rand@PLT
cltd
idivl %ebp
addl %r13d, %edx
movl %edx, (%rbx)
addq $4, %rbx
cmpq %r12, %rbx
jne .L46
addl $1, %r14d
addq %r15, %r12
cmpl %r14d, 4(%rsp)
jne .L45
.L43:
addq $24, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _Z12generaMatrizPiiii, .-_Z12generaMatrizPiiii
.globl _Z41__device_stub__Z14productoKernelPiPKiS1_jPiPKiS1_j
.type _Z41__device_stub__Z14productoKernelPiPKiS1_jPiPKiS1_j, @function
_Z41__device_stub__Z14productoKernelPiPKiS1_jPiPKiS1_j:
.LFB2088:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L53
.L49:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L54
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L53:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z14productoKernelPiPKiS1_j(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L49
.L54:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2088:
.size _Z41__device_stub__Z14productoKernelPiPKiS1_jPiPKiS1_j, .-_Z41__device_stub__Z14productoKernelPiPKiS1_jPiPKiS1_j
.globl _Z14productoKernelPiPKiS1_j
.type _Z14productoKernelPiPKiS1_j, @function
_Z14productoKernelPiPKiS1_j:
.LFB2089:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z41__device_stub__Z14productoKernelPiPKiS1_jPiPKiS1_j
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2089:
.size _Z14productoKernelPiPKiS1_j, .-_Z14productoKernelPiPKiS1_j
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC7:
.string "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"
.section .rodata.str1.1
.LC8:
.string "cudaMalloc failed!"
.LC9:
.string "cudaMemcpy failed!"
.LC10:
.string "addKernel launch failed: %s\n"
.section .rodata.str1.8
.align 8
.LC11:
.string "cudaDeviceSynchronize returned error code %d after launching addKernel!\n"
.text
.globl _Z16prodMatricesCudaPiPKiS1_j
.type _Z16prodMatricesCudaPiPKiS1_j, @function
_Z16prodMatricesCudaPiPKiS1_j:
.LFB2063:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $72, %rsp
.cfi_def_cfa_offset 128
movq %rdi, %r14
movq %rsi, %r12
movq %rdx, %r13
movl %ecx, %ebp
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
movq $0, 8(%rsp)
movq $0, 16(%rsp)
movq $0, 24(%rsp)
movl $4, %esi
movl %ecx, %edi
call _Z9grid_calcii
movl %eax, 32(%rsp)
movl %eax, 36(%rsp)
movl $1, 40(%rsp)
movl $4, 44(%rsp)
movl $4, 48(%rsp)
movl $1, 52(%rsp)
movl $0, %edi
call cudaSetDevice@PLT
testl %eax, %eax
jne .L70
movl %ebp, %r15d
imull %ebp, %r15d
salq $2, %r15
leaq 24(%rsp), %rdi
movq %r15, %rsi
call cudaMalloc@PLT
movl %eax, %ebx
testl %eax, %eax
jne .L71
leaq 8(%rsp), %rdi
movq %r15, %rsi
call cudaMalloc@PLT
movl %eax, %ebx
testl %eax, %eax
jne .L72
leaq 16(%rsp), %rdi
movq %r15, %rsi
call cudaMalloc@PLT
movl %eax, %ebx
testl %eax, %eax
jne .L73
movl $1, %ecx
movq %r15, %rdx
movq %r12, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %ebx
testl %eax, %eax
jne .L74
movl $1, %ecx
movq %r15, %rdx
movq %r13, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %ebx
testl %eax, %eax
jne .L75
movl 52(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movq 32(%rsp), %rdi
movl 40(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L76
.L65:
call cudaGetLastError@PLT
movl %eax, %ebx
testl %eax, %eax
jne .L77
call cudaDeviceSynchronize@PLT
movl %eax, %ebx
testl %eax, %eax
jne .L78
movl $2, %ecx
movq %r15, %rdx
movq 24(%rsp), %rsi
movq %r14, %rdi
call cudaMemcpy@PLT
movl %eax, %ebx
testl %eax, %eax
je .L59
leaq .LC9(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
jmp .L59
.L70:
movl %eax, %ebx
leaq .LC7(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
.L59:
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L79
movl %ebx, %eax
addq $72, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L71:
.cfi_restore_state
leaq .LC8(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
jmp .L59
.L72:
leaq .LC8(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
jmp .L59
.L73:
leaq .LC8(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
jmp .L59
.L74:
leaq .LC9(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
jmp .L59
.L75:
leaq .LC9(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
jmp .L59
.L76:
movl %ebp, %ecx
movq 16(%rsp), %rdx
movq 8(%rsp), %rsi
movq 24(%rsp), %rdi
call _Z41__device_stub__Z14productoKernelPiPKiS1_jPiPKiS1_j
jmp .L65
.L77:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rcx
leaq .LC10(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
jmp .L59
.L78:
movl %eax, %ecx
leaq .LC11(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
jmp .L59
.L79:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2063:
.size _Z16prodMatricesCudaPiPKiS1_j, .-_Z16prodMatricesCudaPiPKiS1_j
.section .rodata.str1.1
.LC12:
.string "addWithCuda failed!"
.LC13:
.string "\n\tMatriz A\n"
.LC14:
.string "\n\tMatriz B\n"
.LC15:
.string "\n\tResultado del producto:\n"
.LC16:
.string "cudaDeviceReset failed!"
.text
.globl main
.type main, @function
main:
.LFB2062:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $3088, %rsp
.cfi_def_cfa_offset 3120
movq %fs:40, %rax
movq %rax, 3080(%rsp)
xorl %eax, %eax
movl $0, %edi
call time@PLT
movl %eax, %edi
call srand@PLT
movq %rsp, %rdi
movl $0, %ebx
movl $128, %ecx
movq %rbx, %rax
rep stosq
movq %rsp, %rbp
movl $-10, %ecx
movl $10, %edx
movl $16, %esi
movq %rbp, %rdi
call _Z12generaMatrizPiiii
leaq 1024(%rsp), %rdi
movl $128, %ecx
movq %rbx, %rax
rep stosq
leaq 1024(%rsp), %r12
movl $-10, %ecx
movl $10, %edx
movl $16, %esi
movq %r12, %rdi
call _Z12generaMatrizPiiii
leaq 2048(%rsp), %rdi
movl $128, %ecx
movq %rbx, %rax
rep stosq
leaq 2048(%rsp), %rdi
movl $16, %ecx
movq %r12, %rdx
movq %rbp, %rsi
call _Z16prodMatricesCudaPiPKiS1_j
testl %eax, %eax
jne .L86
leaq .LC13(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %rsp, %rdi
movl $16, %esi
call _Z13imprimeMatrizPii
leaq .LC14(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 1024(%rsp), %rdi
movl $16, %esi
call _Z13imprimeMatrizPii
leaq .LC15(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 2048(%rsp), %rdi
movl $16, %esi
call _Z13imprimeMatrizPii
call cudaDeviceReset@PLT
movl %eax, %edx
movl $0, %eax
testl %edx, %edx
jne .L87
.L80:
movq 3080(%rsp), %rdx
subq %fs:40, %rdx
jne .L88
addq $3088, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L86:
.cfi_restore_state
leaq .LC12(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1, %eax
jmp .L80
.L87:
leaq .LC16(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
call __fprintf_chk@PLT
movl $1, %eax
jmp .L80
.L88:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2062:
.size main, .-main
.section .rodata.str1.1
.LC17:
.string "_Z14productoKernelPiPKiS1_j"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2091:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC17(%rip), %rdx
movq %rdx, %rcx
leaq _Z14productoKernelPiPKiS1_j(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2091:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC0:
.long 0
.long 1127219200
.align 8
.LC2:
.long 0
.long 1072693248
.align 8
.LC3:
.long -1
.long 2147483647
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | /* Enunciado:
* Multiplicacion de Matrices MxN (16x16) por Bloques en CUDA
*/
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
cudaError_t prodMatricesCuda(int *c, const int *a, const int *b, unsigned int Width);
const int TILE_WIDTH = 4;//Se ha establecido un tamaño de tesela de 4 hilos
__global__ void productoKernel(int *c, const int *a, const int *b, unsigned int Width)
{
int id_fil = blockIdx.y * TILE_WIDTH + threadIdx.y;
int id_col = blockIdx.x * TILE_WIDTH + threadIdx.x;
int n = 0;
if ((id_fil < Width) && (id_col < Width)) {//Si el hilo está fuera de los valores, no debe actuar
for (int i = 0; i < Width; i++) {
n = n + (a[id_fil*Width + i] * b[i*Width + id_col]);
}
c[id_fil*Width + id_col] = n;
}
}
//Hace uso de ceil para obtener el tamaño del Grid de Bloques
int grid_calc(int Width, int Tile_Width) {
double x = Width;
double y = Tile_Width;
return (int)(ceil(x / y));//redondea hacia arriba la división
}
void imprimeMatriz(int *v, int m, int n) {//( m * n )
int i, j, x;
int ws;//numero de espacios de caracteres por casilla
printf("\n");
for (i = 0; i < m; i++) {
for (j = 0; j < n; j++) {
ws = 5;
x = v[i*m + j];
if (x < 0) {//si es negativo, se ocupa un hueco por el signo "-"
ws--;
x = -1 * x;
}
else {//para alinear los dígitos
ws--;
printf(" ");
}
do {//Se ocupa un hueco por digito del numero
ws--;
x = x / 10;
} while (x > 0);
printf("%d", v[i*m + j]);//imprimimos el numero
while (ws > 0) {//y ocupamos el resto de huecos con espacios en blanco
printf(" ");
ws--;
}
}
printf("\n");
}
}
void imprimeMatriz(int *v, int m) {//Para matrices cuadradas ( m * m )
int i, j, x;
int ws;//numero de espacios de caracteres por casilla
printf("\n");
for (i = 0; i < m; i++) {
for (j = 0; j < m; j++) {
ws = 5;
x = v[i*m + j];
if (x < 0) {//si es negativo, se ocupa un hueco por el signo "-"
ws--;
x = -1 * x;
}
else {//para alinear los dígitos
ws--;
printf(" ");
}
do {//Se ocupa un hueco por digito del numero
ws--;
x = x / 10;
} while (x > 0);
printf("%d", v[i*m + j]);//imprimimos el numero
while (ws > 0) {//y ocupamos el resto de huecos con espacios en blanco
printf(" ");
ws--;
}
}
printf("\n");
}
}
void generaMatriz(int *v, int m, int n, int max, int min) {//( m * n )
int i, j;
for (i = 0; i < m; i++) {
for (j = 0; j < n; j++) {
v[i*n + j] = (rand() % (max - min)) + min;
}
}
}
void generaMatriz(int *v, int m, int max, int min) {//Para matrices cuadradas ( m * m )
int i, j;
for (i = 0; i < m; i++) {
for (j = 0; j < m; j++) {
v[i*m + j] = (rand() % (max - min)) + min;
}
}
}
int main()
{
/*const int Width = 6;//Pruebas
int a[6 * 6] = { 8, 7, 3, -4, -2, -3,
8, 9, -6, 3, -1, -4,
-6, -1, -10, -7, 8, 6,
4, -6, -6, -3, 8, 7,
-1, -1, -7, -8, -1, 9,
7, 8, 3, 7, 2, 3 };
int b[6 * 6] = { 0, 9, 9, 5, -10, 6,
-2, 3, 8, 4, 0, -9,
7, 1, -8, -9, -10, -9,
-3, -5, 7, -2, 6, 4,
7, 9, -3, -9, -9, 6,
6, 6, 4, -8, 8, -5 };
//Resultado de a * b =
// -13 80 70 91 -140 -55
// -100 45 200 165 -25 47
// 45 76 -31 -50 94 53
// 77 141 19 -72 -14 133
// 24 66 22 7 113 -17
// 16 91 158 -16 -52 -32*/
srand((unsigned int)time(0));
const int max = 10;
const int min = -10;
const int Width = 16;
int a[Width * Width] = { 0 };
generaMatriz(a, Width, max, min);
int b[Width * Width] = { 0 };
generaMatriz(b, Width, max, min);
int c[Width * Width] = { 0 };
// Add vectors in parallel.
cudaError_t cudaStatus = prodMatricesCuda(c, a, b, Width);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("\n\tMatriz A\n");
imprimeMatriz(a, Width);
printf("\n\tMatriz B\n");
imprimeMatriz(b, Width);
printf("\n\tResultado del producto:\n");
imprimeMatriz(c, Width);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t prodMatricesCuda(int *c, const int *a, const int *b, unsigned int Width)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
dim3 DimGrid(grid_calc(Width, TILE_WIDTH), grid_calc(Width, TILE_WIDTH));
dim3 DimBlock(TILE_WIDTH, TILE_WIDTH);
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, Width * Width * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, Width * Width * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, Width * Width * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, Width * Width * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, Width * Width * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
productoKernel<<<DimGrid, DimBlock>>>(dev_c, dev_a, dev_b, Width);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, Width * Width * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
} | /* Enunciado:
* Multiplicacion de Matrices MxN (16x16) por Bloques en CUDA
*/
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
hipError_t prodMatricesCuda(int *c, const int *a, const int *b, unsigned int Width);
const int TILE_WIDTH = 4;//Se ha establecido un tamaño de tesela de 4 hilos
__global__ void productoKernel(int *c, const int *a, const int *b, unsigned int Width)
{
int id_fil = blockIdx.y * TILE_WIDTH + threadIdx.y;
int id_col = blockIdx.x * TILE_WIDTH + threadIdx.x;
int n = 0;
if ((id_fil < Width) && (id_col < Width)) {//Si el hilo está fuera de los valores, no debe actuar
for (int i = 0; i < Width; i++) {
n = n + (a[id_fil*Width + i] * b[i*Width + id_col]);
}
c[id_fil*Width + id_col] = n;
}
}
//Hace uso de ceil para obtener el tamaño del Grid de Bloques
int grid_calc(int Width, int Tile_Width) {
double x = Width;
double y = Tile_Width;
return (int)(ceil(x / y));//redondea hacia arriba la división
}
void imprimeMatriz(int *v, int m, int n) {//( m * n )
int i, j, x;
int ws;//numero de espacios de caracteres por casilla
printf("\n");
for (i = 0; i < m; i++) {
for (j = 0; j < n; j++) {
ws = 5;
x = v[i*m + j];
if (x < 0) {//si es negativo, se ocupa un hueco por el signo "-"
ws--;
x = -1 * x;
}
else {//para alinear los dígitos
ws--;
printf(" ");
}
do {//Se ocupa un hueco por digito del numero
ws--;
x = x / 10;
} while (x > 0);
printf("%d", v[i*m + j]);//imprimimos el numero
while (ws > 0) {//y ocupamos el resto de huecos con espacios en blanco
printf(" ");
ws--;
}
}
printf("\n");
}
}
void imprimeMatriz(int *v, int m) {//Para matrices cuadradas ( m * m )
int i, j, x;
int ws;//numero de espacios de caracteres por casilla
printf("\n");
for (i = 0; i < m; i++) {
for (j = 0; j < m; j++) {
ws = 5;
x = v[i*m + j];
if (x < 0) {//si es negativo, se ocupa un hueco por el signo "-"
ws--;
x = -1 * x;
}
else {//para alinear los dígitos
ws--;
printf(" ");
}
do {//Se ocupa un hueco por digito del numero
ws--;
x = x / 10;
} while (x > 0);
printf("%d", v[i*m + j]);//imprimimos el numero
while (ws > 0) {//y ocupamos el resto de huecos con espacios en blanco
printf(" ");
ws--;
}
}
printf("\n");
}
}
void generaMatriz(int *v, int m, int n, int max, int min) {//( m * n )
int i, j;
for (i = 0; i < m; i++) {
for (j = 0; j < n; j++) {
v[i*n + j] = (rand() % (max - min)) + min;
}
}
}
void generaMatriz(int *v, int m, int max, int min) {//Para matrices cuadradas ( m * m )
int i, j;
for (i = 0; i < m; i++) {
for (j = 0; j < m; j++) {
v[i*m + j] = (rand() % (max - min)) + min;
}
}
}
int main()
{
/*const int Width = 6;//Pruebas
int a[6 * 6] = { 8, 7, 3, -4, -2, -3,
8, 9, -6, 3, -1, -4,
-6, -1, -10, -7, 8, 6,
4, -6, -6, -3, 8, 7,
-1, -1, -7, -8, -1, 9,
7, 8, 3, 7, 2, 3 };
int b[6 * 6] = { 0, 9, 9, 5, -10, 6,
-2, 3, 8, 4, 0, -9,
7, 1, -8, -9, -10, -9,
-3, -5, 7, -2, 6, 4,
7, 9, -3, -9, -9, 6,
6, 6, 4, -8, 8, -5 };
//Resultado de a * b =
// -13 80 70 91 -140 -55
// -100 45 200 165 -25 47
// 45 76 -31 -50 94 53
// 77 141 19 -72 -14 133
// 24 66 22 7 113 -17
// 16 91 158 -16 -52 -32*/
srand((unsigned int)time(0));
const int max = 10;
const int min = -10;
const int Width = 16;
int a[Width * Width] = { 0 };
generaMatriz(a, Width, max, min);
int b[Width * Width] = { 0 };
generaMatriz(b, Width, max, min);
int c[Width * Width] = { 0 };
// Add vectors in parallel.
hipError_t cudaStatus = prodMatricesCuda(c, a, b, Width);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("\n\tMatriz A\n");
imprimeMatriz(a, Width);
printf("\n\tMatriz B\n");
imprimeMatriz(b, Width);
printf("\n\tResultado del producto:\n");
imprimeMatriz(c, Width);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t prodMatricesCuda(int *c, const int *a, const int *b, unsigned int Width)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
dim3 DimGrid(grid_calc(Width, TILE_WIDTH), grid_calc(Width, TILE_WIDTH));
dim3 DimBlock(TILE_WIDTH, TILE_WIDTH);
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)&dev_c, Width * Width * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_a, Width * Width * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_b, Width * Width * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_a, a, Width * Width * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_b, b, Width * Width * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
productoKernel<<<DimGrid, DimBlock>>>(dev_c, dev_a, dev_b, Width);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(c, dev_c, Width * Width * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
return cudaStatus;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | /* Enunciado:
* Multiplicacion de Matrices MxN (16x16) por Bloques en CUDA
*/
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
hipError_t prodMatricesCuda(int *c, const int *a, const int *b, unsigned int Width);
const int TILE_WIDTH = 4;//Se ha establecido un tamaño de tesela de 4 hilos
__global__ void productoKernel(int *c, const int *a, const int *b, unsigned int Width)
{
int id_fil = blockIdx.y * TILE_WIDTH + threadIdx.y;
int id_col = blockIdx.x * TILE_WIDTH + threadIdx.x;
int n = 0;
if ((id_fil < Width) && (id_col < Width)) {//Si el hilo está fuera de los valores, no debe actuar
for (int i = 0; i < Width; i++) {
n = n + (a[id_fil*Width + i] * b[i*Width + id_col]);
}
c[id_fil*Width + id_col] = n;
}
}
//Hace uso de ceil para obtener el tamaño del Grid de Bloques
int grid_calc(int Width, int Tile_Width) {
double x = Width;
double y = Tile_Width;
return (int)(ceil(x / y));//redondea hacia arriba la división
}
void imprimeMatriz(int *v, int m, int n) {//( m * n )
int i, j, x;
int ws;//numero de espacios de caracteres por casilla
printf("\n");
for (i = 0; i < m; i++) {
for (j = 0; j < n; j++) {
ws = 5;
x = v[i*m + j];
if (x < 0) {//si es negativo, se ocupa un hueco por el signo "-"
ws--;
x = -1 * x;
}
else {//para alinear los dígitos
ws--;
printf(" ");
}
do {//Se ocupa un hueco por digito del numero
ws--;
x = x / 10;
} while (x > 0);
printf("%d", v[i*m + j]);//imprimimos el numero
while (ws > 0) {//y ocupamos el resto de huecos con espacios en blanco
printf(" ");
ws--;
}
}
printf("\n");
}
}
void imprimeMatriz(int *v, int m) {//Para matrices cuadradas ( m * m )
int i, j, x;
int ws;//numero de espacios de caracteres por casilla
printf("\n");
for (i = 0; i < m; i++) {
for (j = 0; j < m; j++) {
ws = 5;
x = v[i*m + j];
if (x < 0) {//si es negativo, se ocupa un hueco por el signo "-"
ws--;
x = -1 * x;
}
else {//para alinear los dígitos
ws--;
printf(" ");
}
do {//Se ocupa un hueco por digito del numero
ws--;
x = x / 10;
} while (x > 0);
printf("%d", v[i*m + j]);//imprimimos el numero
while (ws > 0) {//y ocupamos el resto de huecos con espacios en blanco
printf(" ");
ws--;
}
}
printf("\n");
}
}
void generaMatriz(int *v, int m, int n, int max, int min) {//( m * n )
int i, j;
for (i = 0; i < m; i++) {
for (j = 0; j < n; j++) {
v[i*n + j] = (rand() % (max - min)) + min;
}
}
}
void generaMatriz(int *v, int m, int max, int min) {//Para matrices cuadradas ( m * m )
int i, j;
for (i = 0; i < m; i++) {
for (j = 0; j < m; j++) {
v[i*m + j] = (rand() % (max - min)) + min;
}
}
}
int main()
{
/*const int Width = 6;//Pruebas
int a[6 * 6] = { 8, 7, 3, -4, -2, -3,
8, 9, -6, 3, -1, -4,
-6, -1, -10, -7, 8, 6,
4, -6, -6, -3, 8, 7,
-1, -1, -7, -8, -1, 9,
7, 8, 3, 7, 2, 3 };
int b[6 * 6] = { 0, 9, 9, 5, -10, 6,
-2, 3, 8, 4, 0, -9,
7, 1, -8, -9, -10, -9,
-3, -5, 7, -2, 6, 4,
7, 9, -3, -9, -9, 6,
6, 6, 4, -8, 8, -5 };
//Resultado de a * b =
// -13 80 70 91 -140 -55
// -100 45 200 165 -25 47
// 45 76 -31 -50 94 53
// 77 141 19 -72 -14 133
// 24 66 22 7 113 -17
// 16 91 158 -16 -52 -32*/
srand((unsigned int)time(0));
const int max = 10;
const int min = -10;
const int Width = 16;
int a[Width * Width] = { 0 };
generaMatriz(a, Width, max, min);
int b[Width * Width] = { 0 };
generaMatriz(b, Width, max, min);
int c[Width * Width] = { 0 };
// Add vectors in parallel.
hipError_t cudaStatus = prodMatricesCuda(c, a, b, Width);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("\n\tMatriz A\n");
imprimeMatriz(a, Width);
printf("\n\tMatriz B\n");
imprimeMatriz(b, Width);
printf("\n\tResultado del producto:\n");
imprimeMatriz(c, Width);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t prodMatricesCuda(int *c, const int *a, const int *b, unsigned int Width)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
dim3 DimGrid(grid_calc(Width, TILE_WIDTH), grid_calc(Width, TILE_WIDTH));
dim3 DimBlock(TILE_WIDTH, TILE_WIDTH);
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)&dev_c, Width * Width * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_a, Width * Width * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_b, Width * Width * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_a, a, Width * Width * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_b, b, Width * Width * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
productoKernel<<<DimGrid, DimBlock>>>(dev_c, dev_a, dev_b, Width);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(c, dev_c, Width * Width * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
return cudaStatus;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z14productoKernelPiPKiS1_j
.globl _Z14productoKernelPiPKiS1_j
.p2align 8
.type _Z14productoKernelPiPKiS1_j,@function
_Z14productoKernelPiPKiS1_j:
s_load_b32 s2, s[0:1], 0x18
v_bfe_u32 v1, v0, 10, 10
v_and_b32_e32 v0, 0x3ff, v0
s_mov_b32 s3, exec_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_lshl_add_u32 v6, s15, 2, v1
v_lshl_add_u32 v0, s14, 2, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_max_u32_e32 v1, v6, v0
s_waitcnt lgkmcnt(0)
v_cmpx_gt_u32_e64 s2, v1
s_cbranch_execz .LBB0_4
s_load_b128 s[4:7], s[0:1], 0x8
v_mul_lo_u32 v7, v6, s2
v_mov_b32_e32 v3, 0
v_mov_b32_e32 v1, v0
s_mov_b32 s3, 0
.p2align 6
.LBB0_2:
s_delay_alu instid0(VALU_DEP_3) | instid1(SALU_CYCLE_1)
v_dual_mov_b32 v5, 0 :: v_dual_add_nc_u32 v4, s3, v7
s_add_i32 s3, s3, 1
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
s_cmp_eq_u32 s2, s3
v_mov_b32_e32 v2, v5
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[4:5], 2, v[4:5]
v_lshlrev_b64 v[8:9], 2, v[1:2]
v_add_nc_u32_e32 v1, s2, v1
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_u32 v4, vcc_lo, s4, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s5, v5, vcc_lo
s_delay_alu instid0(VALU_DEP_4)
v_add_co_u32 v8, vcc_lo, s6, v8
v_add_co_ci_u32_e32 v9, vcc_lo, s7, v9, vcc_lo
global_load_b32 v2, v[4:5], off
global_load_b32 v8, v[8:9], off
s_waitcnt vmcnt(0)
v_mad_u64_u32 v[4:5], null, v8, v2, v[3:4]
s_delay_alu instid0(VALU_DEP_1)
v_mov_b32_e32 v3, v4
s_cbranch_scc0 .LBB0_2
s_load_b64 s[0:1], s[0:1], 0x0
v_mad_u64_u32 v[1:2], null, v6, s2, v[0:1]
v_mov_b32_e32 v2, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b32 v[0:1], v4, off
.LBB0_4:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z14productoKernelPiPKiS1_j
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 28
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 10
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z14productoKernelPiPKiS1_j, .Lfunc_end0-_Z14productoKernelPiPKiS1_j
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 28
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z14productoKernelPiPKiS1_j
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z14productoKernelPiPKiS1_j.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 10
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | /* Enunciado:
* Multiplicacion de Matrices MxN (16x16) por Bloques en CUDA
*/
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
hipError_t prodMatricesCuda(int *c, const int *a, const int *b, unsigned int Width);
const int TILE_WIDTH = 4;//Se ha establecido un tamaño de tesela de 4 hilos
__global__ void productoKernel(int *c, const int *a, const int *b, unsigned int Width)
{
int id_fil = blockIdx.y * TILE_WIDTH + threadIdx.y;
int id_col = blockIdx.x * TILE_WIDTH + threadIdx.x;
int n = 0;
if ((id_fil < Width) && (id_col < Width)) {//Si el hilo está fuera de los valores, no debe actuar
for (int i = 0; i < Width; i++) {
n = n + (a[id_fil*Width + i] * b[i*Width + id_col]);
}
c[id_fil*Width + id_col] = n;
}
}
//Hace uso de ceil para obtener el tamaño del Grid de Bloques
int grid_calc(int Width, int Tile_Width) {
double x = Width;
double y = Tile_Width;
return (int)(ceil(x / y));//redondea hacia arriba la división
}
void imprimeMatriz(int *v, int m, int n) {//( m * n )
int i, j, x;
int ws;//numero de espacios de caracteres por casilla
printf("\n");
for (i = 0; i < m; i++) {
for (j = 0; j < n; j++) {
ws = 5;
x = v[i*m + j];
if (x < 0) {//si es negativo, se ocupa un hueco por el signo "-"
ws--;
x = -1 * x;
}
else {//para alinear los dígitos
ws--;
printf(" ");
}
do {//Se ocupa un hueco por digito del numero
ws--;
x = x / 10;
} while (x > 0);
printf("%d", v[i*m + j]);//imprimimos el numero
while (ws > 0) {//y ocupamos el resto de huecos con espacios en blanco
printf(" ");
ws--;
}
}
printf("\n");
}
}
void imprimeMatriz(int *v, int m) {//Para matrices cuadradas ( m * m )
int i, j, x;
int ws;//numero de espacios de caracteres por casilla
printf("\n");
for (i = 0; i < m; i++) {
for (j = 0; j < m; j++) {
ws = 5;
x = v[i*m + j];
if (x < 0) {//si es negativo, se ocupa un hueco por el signo "-"
ws--;
x = -1 * x;
}
else {//para alinear los dígitos
ws--;
printf(" ");
}
do {//Se ocupa un hueco por digito del numero
ws--;
x = x / 10;
} while (x > 0);
printf("%d", v[i*m + j]);//imprimimos el numero
while (ws > 0) {//y ocupamos el resto de huecos con espacios en blanco
printf(" ");
ws--;
}
}
printf("\n");
}
}
void generaMatriz(int *v, int m, int n, int max, int min) {//( m * n )
int i, j;
for (i = 0; i < m; i++) {
for (j = 0; j < n; j++) {
v[i*n + j] = (rand() % (max - min)) + min;
}
}
}
void generaMatriz(int *v, int m, int max, int min) {//Para matrices cuadradas ( m * m )
int i, j;
for (i = 0; i < m; i++) {
for (j = 0; j < m; j++) {
v[i*m + j] = (rand() % (max - min)) + min;
}
}
}
int main()
{
/*const int Width = 6;//Pruebas
int a[6 * 6] = { 8, 7, 3, -4, -2, -3,
8, 9, -6, 3, -1, -4,
-6, -1, -10, -7, 8, 6,
4, -6, -6, -3, 8, 7,
-1, -1, -7, -8, -1, 9,
7, 8, 3, 7, 2, 3 };
int b[6 * 6] = { 0, 9, 9, 5, -10, 6,
-2, 3, 8, 4, 0, -9,
7, 1, -8, -9, -10, -9,
-3, -5, 7, -2, 6, 4,
7, 9, -3, -9, -9, 6,
6, 6, 4, -8, 8, -5 };
//Resultado de a * b =
// -13 80 70 91 -140 -55
// -100 45 200 165 -25 47
// 45 76 -31 -50 94 53
// 77 141 19 -72 -14 133
// 24 66 22 7 113 -17
// 16 91 158 -16 -52 -32*/
srand((unsigned int)time(0));
const int max = 10;
const int min = -10;
const int Width = 16;
int a[Width * Width] = { 0 };
generaMatriz(a, Width, max, min);
int b[Width * Width] = { 0 };
generaMatriz(b, Width, max, min);
int c[Width * Width] = { 0 };
// Add vectors in parallel.
hipError_t cudaStatus = prodMatricesCuda(c, a, b, Width);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("\n\tMatriz A\n");
imprimeMatriz(a, Width);
printf("\n\tMatriz B\n");
imprimeMatriz(b, Width);
printf("\n\tResultado del producto:\n");
imprimeMatriz(c, Width);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t prodMatricesCuda(int *c, const int *a, const int *b, unsigned int Width)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
dim3 DimGrid(grid_calc(Width, TILE_WIDTH), grid_calc(Width, TILE_WIDTH));
dim3 DimBlock(TILE_WIDTH, TILE_WIDTH);
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)&dev_c, Width * Width * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_a, Width * Width * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_b, Width * Width * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_a, a, Width * Width * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_b, b, Width * Width * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
productoKernel<<<DimGrid, DimBlock>>>(dev_c, dev_a, dev_b, Width);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(c, dev_c, Width * Width * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
return cudaStatus;
} | .text
.file "kernel.hip"
.globl _Z29__device_stub__productoKernelPiPKiS1_j # -- Begin function _Z29__device_stub__productoKernelPiPKiS1_j
.p2align 4, 0x90
.type _Z29__device_stub__productoKernelPiPKiS1_j,@function
_Z29__device_stub__productoKernelPiPKiS1_j: # @_Z29__device_stub__productoKernelPiPKiS1_j
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z14productoKernelPiPKiS1_j, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z29__device_stub__productoKernelPiPKiS1_j, .Lfunc_end0-_Z29__device_stub__productoKernelPiPKiS1_j
.cfi_endproc
# -- End function
.globl _Z9grid_calcii # -- Begin function _Z9grid_calcii
.p2align 4, 0x90
.type _Z9grid_calcii,@function
_Z9grid_calcii: # @_Z9grid_calcii
.cfi_startproc
# %bb.0:
pushq %rax
.cfi_def_cfa_offset 16
cvtsi2sd %edi, %xmm0
cvtsi2sd %esi, %xmm1
divsd %xmm1, %xmm0
callq ceil@PLT
cvttsd2si %xmm0, %eax
popq %rcx
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z9grid_calcii, .Lfunc_end1-_Z9grid_calcii
.cfi_endproc
# -- End function
.globl _Z13imprimeMatrizPiii # -- Begin function _Z13imprimeMatrizPiii
.p2align 4, 0x90
.type _Z13imprimeMatrizPiii,@function
_Z13imprimeMatrizPiii: # @_Z13imprimeMatrizPiii
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $40, %rsp
.cfi_def_cfa_offset 96
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %edx, 12(%rsp) # 4-byte Spill
movl %esi, %ebx
movq %rdi, 24(%rsp) # 8-byte Spill
movl $10, %edi
callq putchar@PLT
movl %ebx, 8(%rsp) # 4-byte Spill
testl %ebx, %ebx
jle .LBB2_13
# %bb.1: # %.preheader.lr.ph
movl 8(%rsp), %eax # 4-byte Reload
movq %rax, 32(%rsp) # 8-byte Spill
movl 12(%rsp), %r12d # 4-byte Reload
xorl %eax, %eax
movq %rax, 16(%rsp) # 8-byte Spill
jmp .LBB2_2
.p2align 4, 0x90
.LBB2_12: # %._crit_edge35
# in Loop: Header=BB2_2 Depth=1
movl $10, %edi
callq putchar@PLT
movq 16(%rsp), %rcx # 8-byte Reload
incq %rcx
movq %rcx, %rax
movq %rcx, 16(%rsp) # 8-byte Spill
cmpq 32(%rsp), %rcx # 8-byte Folded Reload
je .LBB2_13
.LBB2_2: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB2_4 Depth 2
# Child Loop BB2_8 Depth 3
# Child Loop BB2_10 Depth 3
cmpl $0, 12(%rsp) # 4-byte Folded Reload
jle .LBB2_12
# %bb.3: # %.lr.ph34
# in Loop: Header=BB2_2 Depth=1
movq 16(%rsp), %rax # 8-byte Reload
# kill: def $eax killed $eax killed $rax def $rax
imull 8(%rsp), %eax # 4-byte Folded Reload
movq 24(%rsp), %rcx # 8-byte Reload
leaq (%rcx,%rax,4), %r14
xorl %ebp, %ebp
jmp .LBB2_4
.p2align 4, 0x90
.LBB2_11: # %._crit_edge
# in Loop: Header=BB2_4 Depth=2
incq %rbp
cmpq %r12, %rbp
je .LBB2_12
.LBB2_4: # Parent Loop BB2_2 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB2_8 Depth 3
# Child Loop BB2_10 Depth 3
movl (%r14,%rbp,4), %r15d
testl %r15d, %r15d
js .LBB2_5
# %bb.6: # in Loop: Header=BB2_4 Depth=2
movl $32, %edi
callq putchar@PLT
jmp .LBB2_7
.p2align 4, 0x90
.LBB2_5: # in Loop: Header=BB2_4 Depth=2
negl %r15d
.LBB2_7: # %.preheader44
# in Loop: Header=BB2_4 Depth=2
movl $5, %r13d
movl $4, %eax
.p2align 4, 0x90
.LBB2_8: # Parent Loop BB2_2 Depth=1
# Parent Loop BB2_4 Depth=2
# => This Inner Loop Header: Depth=3
movl %eax, %ebx
movslq %r15d, %rcx
imulq $1717986919, %rcx, %r15 # imm = 0x66666667
movq %r15, %rax
shrq $63, %rax
sarq $34, %r15
addl %eax, %r15d
decl %r13d
leal -1(%rbx), %eax
cmpl $9, %ecx
jg .LBB2_8
# %bb.9: # in Loop: Header=BB2_4 Depth=2
movl (%r14,%rbp,4), %esi
movl $.L.str.2, %edi
xorl %eax, %eax
callq printf
cmpl $2, %r13d
jl .LBB2_11
.p2align 4, 0x90
.LBB2_10: # %.lr.ph
# Parent Loop BB2_2 Depth=1
# Parent Loop BB2_4 Depth=2
# => This Inner Loop Header: Depth=3
movl $32, %edi
callq putchar@PLT
decl %ebx
cmpl $1, %ebx
jg .LBB2_10
jmp .LBB2_11
.LBB2_13: # %._crit_edge37
addq $40, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size _Z13imprimeMatrizPiii, .Lfunc_end2-_Z13imprimeMatrizPiii
.cfi_endproc
# -- End function
.globl _Z13imprimeMatrizPii # -- Begin function _Z13imprimeMatrizPii
.p2align 4, 0x90
.type _Z13imprimeMatrizPii,@function
_Z13imprimeMatrizPii: # @_Z13imprimeMatrizPii
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $24, %rsp
.cfi_def_cfa_offset 80
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %esi, %ebx
movq %rdi, 8(%rsp) # 8-byte Spill
movl $10, %edi
callq putchar@PLT
movl %ebx, 4(%rsp) # 4-byte Spill
testl %ebx, %ebx
jle .LBB3_12
# %bb.1: # %.preheader.lr.ph
movl 4(%rsp), %r15d # 4-byte Reload
xorl %eax, %eax
jmp .LBB3_2
.p2align 4, 0x90
.LBB3_11: # %._crit_edge35
# in Loop: Header=BB3_2 Depth=1
movl $10, %edi
callq putchar@PLT
movq 16(%rsp), %rax # 8-byte Reload
incq %rax
cmpq %r15, %rax
je .LBB3_12
.LBB3_2: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB3_3 Depth 2
# Child Loop BB3_7 Depth 3
# Child Loop BB3_9 Depth 3
movq %rax, 16(%rsp) # 8-byte Spill
# kill: def $eax killed $eax killed $rax def $rax
imull 4(%rsp), %eax # 4-byte Folded Reload
movq 8(%rsp), %rcx # 8-byte Reload
leaq (%rcx,%rax,4), %r13
xorl %ebp, %ebp
jmp .LBB3_3
.p2align 4, 0x90
.LBB3_10: # %._crit_edge
# in Loop: Header=BB3_3 Depth=2
incq %rbp
cmpq %r15, %rbp
je .LBB3_11
.LBB3_3: # Parent Loop BB3_2 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB3_7 Depth 3
# Child Loop BB3_9 Depth 3
movl (%r13,%rbp,4), %ebx
testl %ebx, %ebx
js .LBB3_4
# %bb.5: # in Loop: Header=BB3_3 Depth=2
movl $32, %edi
callq putchar@PLT
jmp .LBB3_6
.p2align 4, 0x90
.LBB3_4: # in Loop: Header=BB3_3 Depth=2
negl %ebx
.LBB3_6: # %.preheader44
# in Loop: Header=BB3_3 Depth=2
movl $5, %r12d
movl $4, %eax
.p2align 4, 0x90
.LBB3_7: # Parent Loop BB3_2 Depth=1
# Parent Loop BB3_3 Depth=2
# => This Inner Loop Header: Depth=3
movl %eax, %r14d
movslq %ebx, %rcx
imulq $1717986919, %rcx, %rbx # imm = 0x66666667
movq %rbx, %rax
shrq $63, %rax
sarq $34, %rbx
addl %eax, %ebx
decl %r12d
leal -1(%r14), %eax
cmpl $9, %ecx
jg .LBB3_7
# %bb.8: # in Loop: Header=BB3_3 Depth=2
movl (%r13,%rbp,4), %esi
movl $.L.str.2, %edi
xorl %eax, %eax
callq printf
cmpl $2, %r12d
jl .LBB3_10
.p2align 4, 0x90
.LBB3_9: # %.lr.ph
# Parent Loop BB3_2 Depth=1
# Parent Loop BB3_3 Depth=2
# => This Inner Loop Header: Depth=3
movl $32, %edi
callq putchar@PLT
decl %r14d
cmpl $1, %r14d
jg .LBB3_9
jmp .LBB3_10
.LBB3_12: # %._crit_edge37
addq $24, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end3:
.size _Z13imprimeMatrizPii, .Lfunc_end3-_Z13imprimeMatrizPii
.cfi_endproc
# -- End function
.globl _Z12generaMatrizPiiiii # -- Begin function _Z12generaMatrizPiiiii
.p2align 4, 0x90
.type _Z12generaMatrizPiiiii,@function
_Z12generaMatrizPiiiii: # @_Z12generaMatrizPiiiii
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $40, %rsp
.cfi_def_cfa_offset 96
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rdi, 24(%rsp) # 8-byte Spill
testl %esi, %esi
jle .LBB4_6
# %bb.1: # %.preheader.lr.ph
movl %r8d, %ebx
movl %ecx, %ebp
subl %r8d, %ebp
movl %esi, %eax
movq %rax, 32(%rsp) # 8-byte Spill
movl %edx, %r13d
xorl %eax, %eax
movq %rax, 8(%rsp) # 8-byte Spill
xorl %r12d, %r12d
movl %edx, 20(%rsp) # 4-byte Spill
jmp .LBB4_2
.p2align 4, 0x90
.LBB4_5: # %._crit_edge
# in Loop: Header=BB4_2 Depth=1
incq %r12
movl 20(%rsp), %edx # 4-byte Reload
movq 8(%rsp), %rax # 8-byte Reload
addl %edx, %eax
movq %rax, 8(%rsp) # 8-byte Spill
cmpq 32(%rsp), %r12 # 8-byte Folded Reload
je .LBB4_6
.LBB4_2: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB4_4 Depth 2
testl %edx, %edx
jle .LBB4_5
# %bb.3: # %.lr.ph
# in Loop: Header=BB4_2 Depth=1
movl 8(%rsp), %eax # 4-byte Reload
movq 24(%rsp), %rcx # 8-byte Reload
leaq (%rcx,%rax,4), %r14
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB4_4: # Parent Loop BB4_2 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
cltd
idivl %ebp
addl %ebx, %edx
movl %edx, (%r14,%r15,4)
incq %r15
cmpq %r15, %r13
jne .LBB4_4
jmp .LBB4_5
.LBB4_6: # %._crit_edge16
addq $40, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end4:
.size _Z12generaMatrizPiiiii, .Lfunc_end4-_Z12generaMatrizPiiiii
.cfi_endproc
# -- End function
.globl _Z12generaMatrizPiiii # -- Begin function _Z12generaMatrizPiiii
.p2align 4, 0x90
.type _Z12generaMatrizPiiii,@function
_Z12generaMatrizPiiii: # @_Z12generaMatrizPiiii
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $24, %rsp
.cfi_def_cfa_offset 80
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rdi, 8(%rsp) # 8-byte Spill
movl %esi, 4(%rsp) # 4-byte Spill
testl %esi, %esi
jle .LBB5_5
# %bb.1: # %.preheader.lr.ph
movl %ecx, %ebx
movl %edx, %ebp
subl %ecx, %ebp
movl 4(%rsp), %r12d # 4-byte Reload
xorl %eax, %eax
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB5_2: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB5_3 Depth 2
movq %rax, 16(%rsp) # 8-byte Spill
movl %eax, %eax
movq 8(%rsp), %rcx # 8-byte Reload
leaq (%rcx,%rax,4), %r14
xorl %r13d, %r13d
.p2align 4, 0x90
.LBB5_3: # Parent Loop BB5_2 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
cltd
idivl %ebp
addl %ebx, %edx
movl %edx, (%r14,%r13,4)
incq %r13
cmpq %r13, %r12
jne .LBB5_3
# %bb.4: # %._crit_edge
# in Loop: Header=BB5_2 Depth=1
incq %r15
movq 16(%rsp), %rax # 8-byte Reload
addl 4(%rsp), %eax # 4-byte Folded Reload
cmpq %r12, %r15
jne .LBB5_2
.LBB5_5: # %._crit_edge16
addq $24, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end5:
.size _Z12generaMatrizPiiii, .Lfunc_end5-_Z12generaMatrizPiiii
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $3072, %rsp # imm = 0xC00
.cfi_def_cfa_offset 3104
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
xorl %r14d, %r14d
xorl %edi, %edi
callq time
movl %eax, %edi
callq srand
leaq 1024(%rsp), %rbx
movl $1024, %edx # imm = 0x400
movq %rbx, %rdi
xorl %esi, %esi
callq memset@PLT
.p2align 4, 0x90
.LBB6_1: # %.preheader.i
# =>This Loop Header: Depth=1
# Child Loop BB6_2 Depth 2
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB6_2: # Parent Loop BB6_1 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
cltq
imulq $1717986919, %rax, %rcx # imm = 0x66666667
movq %rcx, %rdx
shrq $63, %rdx
sarq $35, %rcx
addl %edx, %ecx
shll $2, %ecx
leal (%rcx,%rcx,4), %ecx
negl %ecx
addl %ecx, %eax
addl $-10, %eax
movl %eax, (%rbx,%r15,4)
incq %r15
cmpq $16, %r15
jne .LBB6_2
# %bb.3: # %._crit_edge.i
# in Loop: Header=BB6_1 Depth=1
incq %r14
addq $64, %rbx
cmpq $16, %r14
jne .LBB6_1
# %bb.4: # %_Z12generaMatrizPiiii.exit
movq %rsp, %rbx
xorl %r14d, %r14d
movl $1024, %edx # imm = 0x400
movq %rbx, %rdi
xorl %esi, %esi
callq memset@PLT
.p2align 4, 0x90
.LBB6_5: # %.preheader.i6
# =>This Loop Header: Depth=1
# Child Loop BB6_6 Depth 2
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB6_6: # Parent Loop BB6_5 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
cltq
imulq $1717986919, %rax, %rcx # imm = 0x66666667
movq %rcx, %rdx
shrq $63, %rdx
sarq $35, %rcx
addl %edx, %ecx
shll $2, %ecx
leal (%rcx,%rcx,4), %ecx
negl %ecx
addl %ecx, %eax
addl $-10, %eax
movl %eax, (%rbx,%r15,4)
incq %r15
cmpq $16, %r15
jne .LBB6_6
# %bb.7: # %._crit_edge.i11
# in Loop: Header=BB6_5 Depth=1
incq %r14
addq $64, %rbx
cmpq $16, %r14
jne .LBB6_5
# %bb.8: # %_Z12generaMatrizPiiii.exit14
leaq 2048(%rsp), %r14
xorl %ebx, %ebx
movl $1024, %edx # imm = 0x400
movq %r14, %rdi
xorl %esi, %esi
callq memset@PLT
leaq 1024(%rsp), %rsi
movq %rsp, %rdx
movq %r14, %rdi
movl $16, %ecx
callq _Z16prodMatricesCudaPiPKiS1_j
testl %eax, %eax
jne .LBB6_9
# %bb.10:
movl $.Lstr, %edi
callq puts@PLT
leaq 1024(%rsp), %rdi
movl $16, %esi
callq _Z13imprimeMatrizPii
movl $.Lstr.1, %edi
callq puts@PLT
movq %rsp, %rdi
movl $16, %esi
callq _Z13imprimeMatrizPii
movl $.Lstr.2, %edi
callq puts@PLT
leaq 2048(%rsp), %rdi
movl $16, %esi
callq _Z13imprimeMatrizPii
callq hipDeviceReset
testl %eax, %eax
jne .LBB6_11
.LBB6_13:
movl %ebx, %eax
addq $3072, %rsp # imm = 0xC00
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.LBB6_9:
.cfi_def_cfa_offset 3104
movq stderr(%rip), %rcx
movl $.L.str.3, %edi
movl $19, %esi
jmp .LBB6_12
.LBB6_11:
movq stderr(%rip), %rcx
movl $.L.str.7, %edi
movl $22, %esi
.LBB6_12:
movl $1, %edx
callq fwrite@PLT
movl $1, %ebx
jmp .LBB6_13
.Lfunc_end6:
.size main, .Lfunc_end6-main
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function _Z16prodMatricesCudaPiPKiS1_j
.LCPI7_0:
.quad 0x3fd0000000000000 # double 0.25
.text
.globl _Z16prodMatricesCudaPiPKiS1_j
.p2align 4, 0x90
.type _Z16prodMatricesCudaPiPKiS1_j,@function
_Z16prodMatricesCudaPiPKiS1_j: # @_Z16prodMatricesCudaPiPKiS1_j
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
subq $144, %rsp
.cfi_def_cfa_offset 192
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %ecx, %ebp
movq %rdx, %r15
movq %rsi, %r12
movq %rdi, %rbx
movq $0, 16(%rsp)
movq $0, 8(%rsp)
movq $0, (%rsp)
cvtsi2sd %ecx, %xmm0
mulsd .LCPI7_0(%rip), %xmm0
callq ceil@PLT
movsd %xmm0, 32(%rsp) # 8-byte Spill
xorl %edi, %edi
callq hipSetDevice
testl %eax, %eax
jne .LBB7_18
# %bb.1:
movl %ebp, %r14d
imull %r14d, %r14d
shlq $2, %r14
movq %rsp, %rdi
movq %r14, %rsi
callq hipMalloc
testl %eax, %eax
jne .LBB7_12
# %bb.2:
leaq 16(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
testl %eax, %eax
jne .LBB7_12
# %bb.3:
leaq 8(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
testl %eax, %eax
jne .LBB7_12
# %bb.4:
movq 16(%rsp), %rdi
movq %r12, %rsi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB7_13
# %bb.5:
movq 8(%rsp), %rdi
movq %r15, %rsi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB7_13
# %bb.6:
cvttsd2si 32(%rsp), %eax # 8-byte Folded Reload
movq %rax, %rdi
shlq $32, %rdi
orq %rax, %rdi
movabsq $17179869188, %rdx # imm = 0x400000004
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB7_8
# %bb.7:
movq (%rsp), %rax
movq 16(%rsp), %rcx
movq 8(%rsp), %rdx
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movq %rdx, 88(%rsp)
movl %ebp, 28(%rsp)
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 88(%rsp), %rax
movq %rax, 128(%rsp)
leaq 28(%rsp), %rax
movq %rax, 136(%rsp)
leaq 72(%rsp), %rdi
leaq 56(%rsp), %rsi
leaq 48(%rsp), %rdx
leaq 40(%rsp), %rcx
callq __hipPopCallConfiguration
movq 72(%rsp), %rsi
movl 80(%rsp), %edx
movq 56(%rsp), %rcx
movl 64(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z14productoKernelPiPKiS1_j, %edi
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB7_8:
callq hipGetLastError
testl %eax, %eax
jne .LBB7_19
# %bb.9:
callq hipDeviceSynchronize
testl %eax, %eax
jne .LBB7_20
# %bb.10:
movq (%rsp), %rsi
movq %rbx, %rdi
movq %r14, %rdx
movl $2, %ecx
callq hipMemcpy
xorl %ebx, %ebx
testl %eax, %eax
je .LBB7_17
# %bb.11:
movq stderr(%rip), %rcx
movl $.L.str.10, %edi
movl $17, %esi
movl $1, %edx
movl %eax, %ebx
jmp .LBB7_16
.LBB7_12:
movl %eax, %ebx
movq stderr(%rip), %rcx
movl $.L.str.9, %edi
jmp .LBB7_14
.LBB7_13:
movl %eax, %ebx
movq stderr(%rip), %rcx
movl $.L.str.10, %edi
.LBB7_14:
movl $17, %esi
.LBB7_15:
movl $1, %edx
.LBB7_16:
callq fwrite@PLT
.LBB7_17:
movq (%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movl %ebx, %eax
addq $144, %rsp
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB7_18:
.cfi_def_cfa_offset 192
movl %eax, %ebx
movq stderr(%rip), %rcx
movl $.L.str.8, %edi
movl $63, %esi
jmp .LBB7_15
.LBB7_19:
movq stderr(%rip), %r14
movl %eax, %ebx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.11, %esi
movq %r14, %rdi
movq %rax, %rdx
xorl %eax, %eax
callq fprintf
jmp .LBB7_17
.LBB7_20:
movq stderr(%rip), %rdi
movl $.L.str.12, %esi
movl %eax, %ebx
movl %eax, %edx
xorl %eax, %eax
callq fprintf
jmp .LBB7_17
.Lfunc_end7:
.size _Z16prodMatricesCudaPiPKiS1_j, .Lfunc_end7-_Z16prodMatricesCudaPiPKiS1_j
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB8_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB8_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z14productoKernelPiPKiS1_j, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end8:
.size __hip_module_ctor, .Lfunc_end8-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB9_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB9_2:
retq
.Lfunc_end9:
.size __hip_module_dtor, .Lfunc_end9-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z14productoKernelPiPKiS1_j,@object # @_Z14productoKernelPiPKiS1_j
.section .rodata,"a",@progbits
.globl _Z14productoKernelPiPKiS1_j
.p2align 3, 0x0
_Z14productoKernelPiPKiS1_j:
.quad _Z29__device_stub__productoKernelPiPKiS1_j
.size _Z14productoKernelPiPKiS1_j, 8
.type .L.str.2,@object # @.str.2
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.2:
.asciz "%d"
.size .L.str.2, 3
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "addWithCuda failed!"
.size .L.str.3, 20
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "hipDeviceReset failed!"
.size .L.str.7, 23
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"
.size .L.str.8, 64
.type .L.str.9,@object # @.str.9
.L.str.9:
.asciz "hipMalloc failed!"
.size .L.str.9, 18
.type .L.str.10,@object # @.str.10
.L.str.10:
.asciz "hipMemcpy failed!"
.size .L.str.10, 18
.type .L.str.11,@object # @.str.11
.L.str.11:
.asciz "addKernel launch failed: %s\n"
.size .L.str.11, 29
.type .L.str.12,@object # @.str.12
.L.str.12:
.asciz "hipDeviceSynchronize returned error code %d after launching addKernel!\n"
.size .L.str.12, 72
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z14productoKernelPiPKiS1_j"
.size .L__unnamed_1, 28
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "\n\tMatriz A"
.size .Lstr, 11
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "\n\tMatriz B"
.size .Lstr.1, 11
.type .Lstr.2,@object # @str.2
.Lstr.2:
.asciz "\n\tResultado del producto:"
.size .Lstr.2, 26
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z29__device_stub__productoKernelPiPKiS1_j
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z14productoKernelPiPKiS1_j
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z14productoKernelPiPKiS1_j
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R17, SR_TID.X ; /* 0x0000000000117919 */
/* 0x000e280000002100 */
/*0030*/ S2R R19, SR_CTAID.Y ; /* 0x0000000000137919 */
/* 0x000e680000002600 */
/*0040*/ S2R R2, SR_TID.Y ; /* 0x0000000000027919 */
/* 0x000e620000002200 */
/*0050*/ IMAD R16, R0, 0x4, R17 ; /* 0x0000000400107824 */
/* 0x001fca00078e0211 */
/*0060*/ ISETP.GE.U32.AND P0, PT, R16, c[0x0][0x178], PT ; /* 0x00005e0010007a0c */
/* 0x000fe40003f06070 */
/*0070*/ LEA R19, R19, R2, 0x2 ; /* 0x0000000213137211 */
/* 0x002fc800078e10ff */
/*0080*/ ISETP.GE.U32.OR P0, PT, R19, c[0x0][0x178], P0 ; /* 0x00005e0013007a0c */
/* 0x000fda0000706470 */
/*0090*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00a0*/ IMAD.MOV.U32 R21, RZ, RZ, c[0x0][0x178] ; /* 0x00005e00ff157624 */
/* 0x000fe200078e00ff */
/*00b0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*00c0*/ HFMA2.MMA R20, -RZ, RZ, 0, 0 ; /* 0x00000000ff147435 */
/* 0x000fe200000001ff */
/*00d0*/ IMAD.MOV.U32 R27, RZ, RZ, RZ ; /* 0x000000ffff1b7224 */
/* 0x000fe400078e00ff */
/*00e0*/ IADD3 R2, R21.reuse, -0x1, RZ ; /* 0xffffffff15027810 */
/* 0x040fe40007ffe0ff */
/*00f0*/ LOP3.LUT R18, R21, 0x3, RZ, 0xc0, !PT ; /* 0x0000000315127812 */
/* 0x000fe400078ec0ff */
/*0100*/ ISETP.GE.U32.AND P1, PT, R2, 0x3, PT ; /* 0x000000030200780c */
/* 0x000fe40003f26070 */
/*0110*/ ISETP.NE.AND P0, PT, R18, RZ, PT ; /* 0x000000ff1200720c */
/* 0x000fd60003f05270 */
/*0120*/ @!P1 BRA 0x3c0 ; /* 0x0000029000009947 */
/* 0x000fea0003800000 */
/*0130*/ IADD3 R3, R17, c[0x0][0x178], RZ ; /* 0x00005e0011037a10 */
/* 0x000fe20007ffe0ff */
/*0140*/ IMAD R25, R19, R21, 0x3 ; /* 0x0000000313197424 */
/* 0x000fe200078e0215 */
/*0150*/ LEA R22, R21.reuse, R16.reuse, 0x1 ; /* 0x0000001015167211 */
/* 0x0c0fe200078e08ff */
/*0160*/ IMAD R24, R21, 0x3, R16 ; /* 0x0000000315187824 */
/* 0x000fe200078e0210 */
/*0170*/ IADD3 R23, R18, -c[0x0][0x178], RZ ; /* 0x80005e0012177a10 */
/* 0x000fe20007ffe0ff */
/*0180*/ IMAD.MOV.U32 R20, RZ, RZ, RZ ; /* 0x000000ffff147224 */
/* 0x000fe200078e00ff */
/*0190*/ MOV R26, R16 ; /* 0x00000010001a7202 */
/* 0x000fe20000000f00 */
/*01a0*/ IMAD R28, R0, 0x4, R3 ; /* 0x00000004001c7824 */
/* 0x000fe400078e0203 */
/*01b0*/ MOV R13, 0x4 ; /* 0x00000004000d7802 */
/* 0x000fe40000000f00 */
/*01c0*/ IADD3 R2, R25.reuse, -0x3, RZ ; /* 0xfffffffd19027810 */
/* 0x040fe40007ffe0ff */
/*01d0*/ IADD3 R4, R25.reuse, -0x2, RZ ; /* 0xfffffffe19047810 */
/* 0x040fe20007ffe0ff */
/*01e0*/ IMAD.WIDE.U32 R14, R26, R13, c[0x0][0x170] ; /* 0x00005c001a0e7625 */
/* 0x000fe200078e000d */
/*01f0*/ IADD3 R8, R25, -0x1, RZ ; /* 0xffffffff19087810 */
/* 0x000fc60007ffe0ff */
/*0200*/ IMAD.WIDE.U32 R2, R2, R13.reuse, c[0x0][0x168] ; /* 0x00005a0002027625 */
/* 0x080fe400078e000d */
/*0210*/ LDG.E R14, [R14.64] ; /* 0x000000040e0e7981 */
/* 0x000ea4000c1e1900 */
/*0220*/ IMAD.WIDE.U32 R4, R4, R13.reuse, c[0x0][0x168] ; /* 0x00005a0004047625 */
/* 0x080fe400078e000d */
/*0230*/ LDG.E R29, [R2.64] ; /* 0x00000004021d7981 */
/* 0x0000a4000c1e1900 */
/*0240*/ IMAD.WIDE.U32 R6, R28, R13.reuse, c[0x0][0x170] ; /* 0x00005c001c067625 */
/* 0x080fe400078e000d */
/*0250*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ee8000c1e1900 */
/*0260*/ LDG.E R7, [R6.64] ; /* 0x0000000406077981 */
/* 0x000ee2000c1e1900 */
/*0270*/ IMAD.WIDE.U32 R8, R8, R13, c[0x0][0x168] ; /* 0x00005a0008087625 */
/* 0x000fc800078e000d */
/*0280*/ IMAD.WIDE.U32 R2, R22, R13.reuse, c[0x0][0x170] ; /* 0x00005c0016027625 */
/* 0x081fe400078e000d */
/*0290*/ LDG.E R8, [R8.64] ; /* 0x0000000408087981 */
/* 0x000f24000c1e1900 */
/*02a0*/ IMAD.WIDE.U32 R10, R25, R13.reuse, c[0x0][0x168] ; /* 0x00005a00190a7625 */
/* 0x080fe400078e000d */
/*02b0*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000f24000c1e1900 */
/*02c0*/ IMAD.WIDE.U32 R12, R24, R13, c[0x0][0x170] ; /* 0x00005c00180c7625 */
/* 0x000fe400078e000d */
/*02d0*/ LDG.E R10, [R10.64] ; /* 0x000000040a0a7981 */
/* 0x000f68000c1e1900 */
/*02e0*/ LDG.E R12, [R12.64] ; /* 0x000000040c0c7981 */
/* 0x000f62000c1e1900 */
/*02f0*/ IADD3 R20, R20, 0x4, RZ ; /* 0x0000000414147810 */
/* 0x000fe20007ffe0ff */
/*0300*/ IMAD R22, R21.reuse, 0x4, R22 ; /* 0x0000000415167824 */
/* 0x040fe200078e0216 */
/*0310*/ LEA R28, R21, R28, 0x2 ; /* 0x0000001c151c7211 */
/* 0x000fc400078e10ff */
/*0320*/ LEA R26, R21, R26, 0x2 ; /* 0x0000001a151a7211 */
/* 0x000fe400078e10ff */
/*0330*/ IADD3 R25, R25, 0x4, RZ ; /* 0x0000000419197810 */
/* 0x000fe40007ffe0ff */
/*0340*/ LEA R24, R21, R24, 0x2 ; /* 0x0000001815187211 */
/* 0x000fe200078e10ff */
/*0350*/ IMAD R14, R14, R29, R27 ; /* 0x0000001d0e0e7224 */
/* 0x004fc800078e021b */
/*0360*/ IMAD R5, R7, R4, R14 ; /* 0x0000000407057224 */
/* 0x008fe400078e020e */
/*0370*/ IMAD.IADD R4, R23, 0x1, R20 ; /* 0x0000000117047824 */
/* 0x000fca00078e0214 */
/*0380*/ ISETP.NE.AND P1, PT, R4, RZ, PT ; /* 0x000000ff0400720c */
/* 0x000fe20003f25270 */
/*0390*/ IMAD R5, R2, R8, R5 ; /* 0x0000000802057224 */
/* 0x010fc800078e0205 */
/*03a0*/ IMAD R27, R12, R10, R5 ; /* 0x0000000a0c1b7224 */
/* 0x020fd000078e0205 */
/*03b0*/ @P1 BRA 0x1b0 ; /* 0xfffffdf000001947 */
/* 0x000fea000383ffff */
/*03c0*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fe400078e00ff */
/*03d0*/ IMAD R2, R19, c[0x0][0x178], R16 ; /* 0x00005e0013027a24 */
/* 0x000fc800078e0210 */
/*03e0*/ IMAD.WIDE.U32 R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fe200078e0003 */
/*03f0*/ @!P0 BRA 0x4e0 ; /* 0x000000e000008947 */
/* 0x000fea0003800000 */
/*0400*/ IMAD R17, R20, c[0x0][0x178], R17 ; /* 0x00005e0014117a24 */
/* 0x000fe400078e0211 */
/*0410*/ IMAD R19, R19, c[0x0][0x178], R20 ; /* 0x00005e0013137a24 */
/* 0x000fc600078e0214 */
/*0420*/ LEA R0, R0, R17, 0x2 ; /* 0x0000001100007211 */
/* 0x000fe400078e10ff */
/*0430*/ HFMA2.MMA R6, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff067435 */
/* 0x000fd400000001ff */
/*0440*/ IMAD.WIDE.U32 R4, R19, R6, c[0x0][0x168] ; /* 0x00005a0013047625 */
/* 0x000fc800078e0006 */
/*0450*/ IMAD.WIDE.U32 R6, R0, R6, c[0x0][0x170] ; /* 0x00005c0000067625 */
/* 0x000fe400078e0006 */
/*0460*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*0470*/ LDG.E R6, [R6.64] ; /* 0x0000000406067981 */
/* 0x000ea2000c1e1900 */
/*0480*/ IADD3 R18, R18, -0x1, RZ ; /* 0xffffffff12127810 */
/* 0x000fe40007ffe0ff */
/*0490*/ IADD3 R19, R19, 0x1, RZ ; /* 0x0000000113137810 */
/* 0x000fe40007ffe0ff */
/*04a0*/ ISETP.NE.AND P0, PT, R18, RZ, PT ; /* 0x000000ff1200720c */
/* 0x000fc40003f05270 */
/*04b0*/ IADD3 R0, R0, c[0x0][0x178], RZ ; /* 0x00005e0000007a10 */
/* 0x000fe20007ffe0ff */
/*04c0*/ IMAD R27, R6, R4, R27 ; /* 0x00000004061b7224 */
/* 0x004fd400078e021b */
/*04d0*/ @P0 BRA 0x430 ; /* 0xffffff5000000947 */
/* 0x000fea000383ffff */
/*04e0*/ STG.E [R2.64], R27 ; /* 0x0000001b02007986 */
/* 0x000fe2000c101904 */
/*04f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0500*/ BRA 0x500; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0510*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0520*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0530*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0540*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0550*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0560*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0570*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0580*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0590*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z14productoKernelPiPKiS1_j
.globl _Z14productoKernelPiPKiS1_j
.p2align 8
.type _Z14productoKernelPiPKiS1_j,@function
_Z14productoKernelPiPKiS1_j:
s_load_b32 s2, s[0:1], 0x18
v_bfe_u32 v1, v0, 10, 10
v_and_b32_e32 v0, 0x3ff, v0
s_mov_b32 s3, exec_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_lshl_add_u32 v6, s15, 2, v1
v_lshl_add_u32 v0, s14, 2, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_max_u32_e32 v1, v6, v0
s_waitcnt lgkmcnt(0)
v_cmpx_gt_u32_e64 s2, v1
s_cbranch_execz .LBB0_4
s_load_b128 s[4:7], s[0:1], 0x8
v_mul_lo_u32 v7, v6, s2
v_mov_b32_e32 v3, 0
v_mov_b32_e32 v1, v0
s_mov_b32 s3, 0
.p2align 6
.LBB0_2:
s_delay_alu instid0(VALU_DEP_3) | instid1(SALU_CYCLE_1)
v_dual_mov_b32 v5, 0 :: v_dual_add_nc_u32 v4, s3, v7
s_add_i32 s3, s3, 1
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
s_cmp_eq_u32 s2, s3
v_mov_b32_e32 v2, v5
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[4:5], 2, v[4:5]
v_lshlrev_b64 v[8:9], 2, v[1:2]
v_add_nc_u32_e32 v1, s2, v1
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_u32 v4, vcc_lo, s4, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s5, v5, vcc_lo
s_delay_alu instid0(VALU_DEP_4)
v_add_co_u32 v8, vcc_lo, s6, v8
v_add_co_ci_u32_e32 v9, vcc_lo, s7, v9, vcc_lo
global_load_b32 v2, v[4:5], off
global_load_b32 v8, v[8:9], off
s_waitcnt vmcnt(0)
v_mad_u64_u32 v[4:5], null, v8, v2, v[3:4]
s_delay_alu instid0(VALU_DEP_1)
v_mov_b32_e32 v3, v4
s_cbranch_scc0 .LBB0_2
s_load_b64 s[0:1], s[0:1], 0x0
v_mad_u64_u32 v[1:2], null, v6, s2, v[0:1]
v_mov_b32_e32 v2, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b32 v[0:1], v4, off
.LBB0_4:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z14productoKernelPiPKiS1_j
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 28
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 10
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z14productoKernelPiPKiS1_j, .Lfunc_end0-_Z14productoKernelPiPKiS1_j
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 28
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z14productoKernelPiPKiS1_j
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z14productoKernelPiPKiS1_j.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 10
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000a2249_00000000-6_kernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2066:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2066:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z9grid_calcii
.type _Z9grid_calcii, @function
_Z9grid_calcii:
.LFB2057:
.cfi_startproc
endbr64
pxor %xmm0, %xmm0
cvtsi2sdl %edi, %xmm0
pxor %xmm1, %xmm1
cvtsi2sdl %esi, %xmm1
divsd %xmm1, %xmm0
movapd %xmm0, %xmm3
movsd .LC3(%rip), %xmm2
movapd %xmm0, %xmm1
andpd %xmm2, %xmm1
movsd .LC0(%rip), %xmm4
ucomisd %xmm1, %xmm4
jbe .L4
cvttsd2siq %xmm0, %rax
pxor %xmm1, %xmm1
cvtsi2sdq %rax, %xmm1
cmpnlesd %xmm1, %xmm3
movsd .LC2(%rip), %xmm4
andpd %xmm4, %xmm3
addsd %xmm1, %xmm3
andnpd %xmm0, %xmm2
orpd %xmm2, %xmm3
.L4:
cvttsd2sil %xmm3, %eax
ret
.cfi_endproc
.LFE2057:
.size _Z9grid_calcii, .-_Z9grid_calcii
.section .rodata.str1.1,"aMS",@progbits,1
.LC4:
.string "\n"
.LC5:
.string " "
.LC6:
.string "%d"
.text
.globl _Z13imprimeMatrizPiii
.type _Z13imprimeMatrizPiii, @function
_Z13imprimeMatrizPiii:
.LFB2058:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $40, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 16(%rsp)
movl %esi, %ebx
movl %edx, %r15d
movl %edx, 12(%rsp)
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
testl %ebx, %ebx
jle .L5
movl $0, %r14d
movl $0, %ebp
movslq %r15d, %rax
movq %rax, 24(%rsp)
leaq .LC5(%rip), %r12
jmp .L7
.L8:
movq %r12, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
.L9:
movl $4, %ebx
.L10:
subl $1, %ebx
movl %ebp, %edx
movslq %ebp, %rax
imulq $1717986919, %rax, %rax
sarq $34, %rax
movl %ebp, %ecx
sarl $31, %ecx
subl %ecx, %eax
movl %eax, %ebp
cmpl $9, %edx
jg .L10
movl (%r14), %edx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
testl %ebx, %ebx
jle .L11
.L12:
movq %r12, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
subl $1, %ebx
jne .L12
.L11:
addq $4, %r13
cmpq %r15, %r13
je .L20
.L13:
movq %r13, %r14
movl 0(%r13), %ebp
testl %ebp, %ebp
jns .L8
negl %ebp
jmp .L9
.L20:
movl (%rsp), %ebp
movl 4(%rsp), %r14d
movl 8(%rsp), %ebx
.L15:
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addl $1, %ebp
addl %ebx, %r14d
cmpl %ebp, %ebx
je .L5
.L7:
cmpl $0, 12(%rsp)
jle .L15
movslq %r14d, %rax
movq 16(%rsp), %rdi
leaq (%rdi,%rax,4), %r13
movq 24(%rsp), %rsi
addq %rsi, %rax
leaq (%rdi,%rax,4), %r15
movl %ebp, (%rsp)
movl %r14d, 4(%rsp)
movl %ebx, 8(%rsp)
jmp .L13
.L5:
addq $40, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2058:
.size _Z13imprimeMatrizPiii, .-_Z13imprimeMatrizPiii
.globl _Z13imprimeMatrizPii
.type _Z13imprimeMatrizPii, @function
_Z13imprimeMatrizPii:
.LFB2059:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $40, %rsp
.cfi_def_cfa_offset 96
movq %rdi, %r15
movl %esi, %ebx
movl %esi, 20(%rsp)
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
testl %ebx, %ebx
jle .L21
movl %ebx, %eax
cltq
leaq 0(,%rax,4), %rdi
movq %rdi, 8(%rsp)
addq %rdi, %r15
negq %rax
salq $2, %rax
movq %rax, 24(%rsp)
movl $0, 16(%rsp)
leaq .LC5(%rip), %r12
jmp .L23
.L24:
movq %r12, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
.L25:
movl $4, %ebx
.L26:
subl $1, %ebx
movl %ebp, %edx
movslq %ebp, %rax
imulq $1717986919, %rax, %rax
sarq $34, %rax
movl %ebp, %ecx
sarl $31, %ecx
subl %ecx, %eax
movl %eax, %ebp
cmpl $9, %edx
jg .L26
movl (%r14), %edx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
testl %ebx, %ebx
jle .L27
.L28:
movq %r12, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
subl $1, %ebx
jne .L28
.L27:
addq $4, %r13
cmpq %r15, %r13
je .L34
.L29:
movq %r13, %r14
movl 0(%r13), %ebp
testl %ebp, %ebp
jns .L24
negl %ebp
jmp .L25
.L34:
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addl $1, 16(%rsp)
movl 16(%rsp), %eax
movq 8(%rsp), %rdi
addq %rdi, %r15
cmpl %eax, 20(%rsp)
je .L21
.L23:
movq 24(%rsp), %rax
leaq (%r15,%rax), %r13
jmp .L29
.L21:
addq $40, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2059:
.size _Z13imprimeMatrizPii, .-_Z13imprimeMatrizPii
.globl _Z12generaMatrizPiiiii
.type _Z12generaMatrizPiiiii, @function
_Z12generaMatrizPiiiii:
.LFB2060:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $40, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 8(%rsp)
movl %esi, 16(%rsp)
movl %edx, 4(%rsp)
movl %ecx, 20(%rsp)
testl %esi, %esi
jle .L35
movl %r8d, %r12d
movl $0, %r15d
movl $0, %r14d
movslq %edx, %rax
movq %rax, 24(%rsp)
jmp .L37
.L39:
movslq %r15d, %rax
movq 8(%rsp), %rcx
leaq (%rcx,%rax,4), %rbx
movq 24(%rsp), %rsi
addq %rsi, %rax
leaq (%rcx,%rax,4), %r13
movl 20(%rsp), %ebp
subl %r12d, %ebp
.L38:
call rand@PLT
cltd
idivl %ebp
addl %r12d, %edx
movl %edx, (%rbx)
addq $4, %rbx
cmpq %r13, %rbx
jne .L38
.L40:
addl $1, %r14d
movl 4(%rsp), %eax
addl %eax, %r15d
cmpl %r14d, 16(%rsp)
je .L35
.L37:
cmpl $0, 4(%rsp)
jg .L39
jmp .L40
.L35:
addq $40, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _Z12generaMatrizPiiiii, .-_Z12generaMatrizPiiiii
.globl _Z12generaMatrizPiiii
.type _Z12generaMatrizPiiii, @function
_Z12generaMatrizPiiii:
.LFB2061:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $24, %rsp
.cfi_def_cfa_offset 80
movl %esi, 4(%rsp)
testl %esi, %esi
jle .L43
movl %ecx, %r13d
movslq %esi, %rax
leaq 0(,%rax,4), %r15
leaq (%rdi,%r15), %r12
negq %rax
salq $2, %rax
movq %rax, 8(%rsp)
movl $0, %r14d
subl %ecx, %edx
movl %edx, %ebp
.L45:
movq 8(%rsp), %rax
leaq (%r12,%rax), %rbx
.L46:
call rand@PLT
cltd
idivl %ebp
addl %r13d, %edx
movl %edx, (%rbx)
addq $4, %rbx
cmpq %r12, %rbx
jne .L46
addl $1, %r14d
addq %r15, %r12
cmpl %r14d, 4(%rsp)
jne .L45
.L43:
addq $24, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _Z12generaMatrizPiiii, .-_Z12generaMatrizPiiii
.globl _Z41__device_stub__Z14productoKernelPiPKiS1_jPiPKiS1_j
.type _Z41__device_stub__Z14productoKernelPiPKiS1_jPiPKiS1_j, @function
_Z41__device_stub__Z14productoKernelPiPKiS1_jPiPKiS1_j:
.LFB2088:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L53
.L49:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L54
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L53:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z14productoKernelPiPKiS1_j(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L49
.L54:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2088:
.size _Z41__device_stub__Z14productoKernelPiPKiS1_jPiPKiS1_j, .-_Z41__device_stub__Z14productoKernelPiPKiS1_jPiPKiS1_j
.globl _Z14productoKernelPiPKiS1_j
.type _Z14productoKernelPiPKiS1_j, @function
_Z14productoKernelPiPKiS1_j:
.LFB2089:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z41__device_stub__Z14productoKernelPiPKiS1_jPiPKiS1_j
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2089:
.size _Z14productoKernelPiPKiS1_j, .-_Z14productoKernelPiPKiS1_j
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC7:
.string "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"
.section .rodata.str1.1
.LC8:
.string "cudaMalloc failed!"
.LC9:
.string "cudaMemcpy failed!"
.LC10:
.string "addKernel launch failed: %s\n"
.section .rodata.str1.8
.align 8
.LC11:
.string "cudaDeviceSynchronize returned error code %d after launching addKernel!\n"
.text
.globl _Z16prodMatricesCudaPiPKiS1_j
.type _Z16prodMatricesCudaPiPKiS1_j, @function
_Z16prodMatricesCudaPiPKiS1_j:
.LFB2063:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $72, %rsp
.cfi_def_cfa_offset 128
movq %rdi, %r14
movq %rsi, %r12
movq %rdx, %r13
movl %ecx, %ebp
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
movq $0, 8(%rsp)
movq $0, 16(%rsp)
movq $0, 24(%rsp)
movl $4, %esi
movl %ecx, %edi
call _Z9grid_calcii
movl %eax, 32(%rsp)
movl %eax, 36(%rsp)
movl $1, 40(%rsp)
movl $4, 44(%rsp)
movl $4, 48(%rsp)
movl $1, 52(%rsp)
movl $0, %edi
call cudaSetDevice@PLT
testl %eax, %eax
jne .L70
movl %ebp, %r15d
imull %ebp, %r15d
salq $2, %r15
leaq 24(%rsp), %rdi
movq %r15, %rsi
call cudaMalloc@PLT
movl %eax, %ebx
testl %eax, %eax
jne .L71
leaq 8(%rsp), %rdi
movq %r15, %rsi
call cudaMalloc@PLT
movl %eax, %ebx
testl %eax, %eax
jne .L72
leaq 16(%rsp), %rdi
movq %r15, %rsi
call cudaMalloc@PLT
movl %eax, %ebx
testl %eax, %eax
jne .L73
movl $1, %ecx
movq %r15, %rdx
movq %r12, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %ebx
testl %eax, %eax
jne .L74
movl $1, %ecx
movq %r15, %rdx
movq %r13, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %ebx
testl %eax, %eax
jne .L75
movl 52(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movq 32(%rsp), %rdi
movl 40(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L76
.L65:
call cudaGetLastError@PLT
movl %eax, %ebx
testl %eax, %eax
jne .L77
call cudaDeviceSynchronize@PLT
movl %eax, %ebx
testl %eax, %eax
jne .L78
movl $2, %ecx
movq %r15, %rdx
movq 24(%rsp), %rsi
movq %r14, %rdi
call cudaMemcpy@PLT
movl %eax, %ebx
testl %eax, %eax
je .L59
leaq .LC9(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
jmp .L59
.L70:
movl %eax, %ebx
leaq .LC7(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
.L59:
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L79
movl %ebx, %eax
addq $72, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L71:
.cfi_restore_state
leaq .LC8(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
jmp .L59
.L72:
leaq .LC8(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
jmp .L59
.L73:
leaq .LC8(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
jmp .L59
.L74:
leaq .LC9(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
jmp .L59
.L75:
leaq .LC9(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
jmp .L59
.L76:
movl %ebp, %ecx
movq 16(%rsp), %rdx
movq 8(%rsp), %rsi
movq 24(%rsp), %rdi
call _Z41__device_stub__Z14productoKernelPiPKiS1_jPiPKiS1_j
jmp .L65
.L77:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rcx
leaq .LC10(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
jmp .L59
.L78:
movl %eax, %ecx
leaq .LC11(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
jmp .L59
.L79:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2063:
.size _Z16prodMatricesCudaPiPKiS1_j, .-_Z16prodMatricesCudaPiPKiS1_j
.section .rodata.str1.1
.LC12:
.string "addWithCuda failed!"
.LC13:
.string "\n\tMatriz A\n"
.LC14:
.string "\n\tMatriz B\n"
.LC15:
.string "\n\tResultado del producto:\n"
.LC16:
.string "cudaDeviceReset failed!"
.text
.globl main
.type main, @function
main:
.LFB2062:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $3088, %rsp
.cfi_def_cfa_offset 3120
movq %fs:40, %rax
movq %rax, 3080(%rsp)
xorl %eax, %eax
movl $0, %edi
call time@PLT
movl %eax, %edi
call srand@PLT
movq %rsp, %rdi
movl $0, %ebx
movl $128, %ecx
movq %rbx, %rax
rep stosq
movq %rsp, %rbp
movl $-10, %ecx
movl $10, %edx
movl $16, %esi
movq %rbp, %rdi
call _Z12generaMatrizPiiii
leaq 1024(%rsp), %rdi
movl $128, %ecx
movq %rbx, %rax
rep stosq
leaq 1024(%rsp), %r12
movl $-10, %ecx
movl $10, %edx
movl $16, %esi
movq %r12, %rdi
call _Z12generaMatrizPiiii
leaq 2048(%rsp), %rdi
movl $128, %ecx
movq %rbx, %rax
rep stosq
leaq 2048(%rsp), %rdi
movl $16, %ecx
movq %r12, %rdx
movq %rbp, %rsi
call _Z16prodMatricesCudaPiPKiS1_j
testl %eax, %eax
jne .L86
leaq .LC13(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %rsp, %rdi
movl $16, %esi
call _Z13imprimeMatrizPii
leaq .LC14(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 1024(%rsp), %rdi
movl $16, %esi
call _Z13imprimeMatrizPii
leaq .LC15(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 2048(%rsp), %rdi
movl $16, %esi
call _Z13imprimeMatrizPii
call cudaDeviceReset@PLT
movl %eax, %edx
movl $0, %eax
testl %edx, %edx
jne .L87
.L80:
movq 3080(%rsp), %rdx
subq %fs:40, %rdx
jne .L88
addq $3088, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L86:
.cfi_restore_state
leaq .LC12(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1, %eax
jmp .L80
.L87:
leaq .LC16(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
call __fprintf_chk@PLT
movl $1, %eax
jmp .L80
.L88:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2062:
.size main, .-main
.section .rodata.str1.1
.LC17:
.string "_Z14productoKernelPiPKiS1_j"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2091:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC17(%rip), %rdx
movq %rdx, %rcx
leaq _Z14productoKernelPiPKiS1_j(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2091:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC0:
.long 0
.long 1127219200
.align 8
.LC2:
.long 0
.long 1072693248
.align 8
.LC3:
.long -1
.long 2147483647
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "kernel.hip"
.globl _Z29__device_stub__productoKernelPiPKiS1_j # -- Begin function _Z29__device_stub__productoKernelPiPKiS1_j
.p2align 4, 0x90
.type _Z29__device_stub__productoKernelPiPKiS1_j,@function
_Z29__device_stub__productoKernelPiPKiS1_j: # @_Z29__device_stub__productoKernelPiPKiS1_j
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z14productoKernelPiPKiS1_j, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z29__device_stub__productoKernelPiPKiS1_j, .Lfunc_end0-_Z29__device_stub__productoKernelPiPKiS1_j
.cfi_endproc
# -- End function
.globl _Z9grid_calcii # -- Begin function _Z9grid_calcii
.p2align 4, 0x90
.type _Z9grid_calcii,@function
_Z9grid_calcii: # @_Z9grid_calcii
.cfi_startproc
# %bb.0:
pushq %rax
.cfi_def_cfa_offset 16
cvtsi2sd %edi, %xmm0
cvtsi2sd %esi, %xmm1
divsd %xmm1, %xmm0
callq ceil@PLT
cvttsd2si %xmm0, %eax
popq %rcx
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z9grid_calcii, .Lfunc_end1-_Z9grid_calcii
.cfi_endproc
# -- End function
.globl _Z13imprimeMatrizPiii # -- Begin function _Z13imprimeMatrizPiii
.p2align 4, 0x90
.type _Z13imprimeMatrizPiii,@function
_Z13imprimeMatrizPiii: # @_Z13imprimeMatrizPiii
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $40, %rsp
.cfi_def_cfa_offset 96
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %edx, 12(%rsp) # 4-byte Spill
movl %esi, %ebx
movq %rdi, 24(%rsp) # 8-byte Spill
movl $10, %edi
callq putchar@PLT
movl %ebx, 8(%rsp) # 4-byte Spill
testl %ebx, %ebx
jle .LBB2_13
# %bb.1: # %.preheader.lr.ph
movl 8(%rsp), %eax # 4-byte Reload
movq %rax, 32(%rsp) # 8-byte Spill
movl 12(%rsp), %r12d # 4-byte Reload
xorl %eax, %eax
movq %rax, 16(%rsp) # 8-byte Spill
jmp .LBB2_2
.p2align 4, 0x90
.LBB2_12: # %._crit_edge35
# in Loop: Header=BB2_2 Depth=1
movl $10, %edi
callq putchar@PLT
movq 16(%rsp), %rcx # 8-byte Reload
incq %rcx
movq %rcx, %rax
movq %rcx, 16(%rsp) # 8-byte Spill
cmpq 32(%rsp), %rcx # 8-byte Folded Reload
je .LBB2_13
.LBB2_2: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB2_4 Depth 2
# Child Loop BB2_8 Depth 3
# Child Loop BB2_10 Depth 3
cmpl $0, 12(%rsp) # 4-byte Folded Reload
jle .LBB2_12
# %bb.3: # %.lr.ph34
# in Loop: Header=BB2_2 Depth=1
movq 16(%rsp), %rax # 8-byte Reload
# kill: def $eax killed $eax killed $rax def $rax
imull 8(%rsp), %eax # 4-byte Folded Reload
movq 24(%rsp), %rcx # 8-byte Reload
leaq (%rcx,%rax,4), %r14
xorl %ebp, %ebp
jmp .LBB2_4
.p2align 4, 0x90
.LBB2_11: # %._crit_edge
# in Loop: Header=BB2_4 Depth=2
incq %rbp
cmpq %r12, %rbp
je .LBB2_12
.LBB2_4: # Parent Loop BB2_2 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB2_8 Depth 3
# Child Loop BB2_10 Depth 3
movl (%r14,%rbp,4), %r15d
testl %r15d, %r15d
js .LBB2_5
# %bb.6: # in Loop: Header=BB2_4 Depth=2
movl $32, %edi
callq putchar@PLT
jmp .LBB2_7
.p2align 4, 0x90
.LBB2_5: # in Loop: Header=BB2_4 Depth=2
negl %r15d
.LBB2_7: # %.preheader44
# in Loop: Header=BB2_4 Depth=2
movl $5, %r13d
movl $4, %eax
.p2align 4, 0x90
.LBB2_8: # Parent Loop BB2_2 Depth=1
# Parent Loop BB2_4 Depth=2
# => This Inner Loop Header: Depth=3
movl %eax, %ebx
movslq %r15d, %rcx
imulq $1717986919, %rcx, %r15 # imm = 0x66666667
movq %r15, %rax
shrq $63, %rax
sarq $34, %r15
addl %eax, %r15d
decl %r13d
leal -1(%rbx), %eax
cmpl $9, %ecx
jg .LBB2_8
# %bb.9: # in Loop: Header=BB2_4 Depth=2
movl (%r14,%rbp,4), %esi
movl $.L.str.2, %edi
xorl %eax, %eax
callq printf
cmpl $2, %r13d
jl .LBB2_11
.p2align 4, 0x90
.LBB2_10: # %.lr.ph
# Parent Loop BB2_2 Depth=1
# Parent Loop BB2_4 Depth=2
# => This Inner Loop Header: Depth=3
movl $32, %edi
callq putchar@PLT
decl %ebx
cmpl $1, %ebx
jg .LBB2_10
jmp .LBB2_11
.LBB2_13: # %._crit_edge37
addq $40, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size _Z13imprimeMatrizPiii, .Lfunc_end2-_Z13imprimeMatrizPiii
.cfi_endproc
# -- End function
.globl _Z13imprimeMatrizPii # -- Begin function _Z13imprimeMatrizPii
.p2align 4, 0x90
.type _Z13imprimeMatrizPii,@function
_Z13imprimeMatrizPii: # @_Z13imprimeMatrizPii
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $24, %rsp
.cfi_def_cfa_offset 80
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %esi, %ebx
movq %rdi, 8(%rsp) # 8-byte Spill
movl $10, %edi
callq putchar@PLT
movl %ebx, 4(%rsp) # 4-byte Spill
testl %ebx, %ebx
jle .LBB3_12
# %bb.1: # %.preheader.lr.ph
movl 4(%rsp), %r15d # 4-byte Reload
xorl %eax, %eax
jmp .LBB3_2
.p2align 4, 0x90
.LBB3_11: # %._crit_edge35
# in Loop: Header=BB3_2 Depth=1
movl $10, %edi
callq putchar@PLT
movq 16(%rsp), %rax # 8-byte Reload
incq %rax
cmpq %r15, %rax
je .LBB3_12
.LBB3_2: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB3_3 Depth 2
# Child Loop BB3_7 Depth 3
# Child Loop BB3_9 Depth 3
movq %rax, 16(%rsp) # 8-byte Spill
# kill: def $eax killed $eax killed $rax def $rax
imull 4(%rsp), %eax # 4-byte Folded Reload
movq 8(%rsp), %rcx # 8-byte Reload
leaq (%rcx,%rax,4), %r13
xorl %ebp, %ebp
jmp .LBB3_3
.p2align 4, 0x90
.LBB3_10: # %._crit_edge
# in Loop: Header=BB3_3 Depth=2
incq %rbp
cmpq %r15, %rbp
je .LBB3_11
.LBB3_3: # Parent Loop BB3_2 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB3_7 Depth 3
# Child Loop BB3_9 Depth 3
movl (%r13,%rbp,4), %ebx
testl %ebx, %ebx
js .LBB3_4
# %bb.5: # in Loop: Header=BB3_3 Depth=2
movl $32, %edi
callq putchar@PLT
jmp .LBB3_6
.p2align 4, 0x90
.LBB3_4: # in Loop: Header=BB3_3 Depth=2
negl %ebx
.LBB3_6: # %.preheader44
# in Loop: Header=BB3_3 Depth=2
movl $5, %r12d
movl $4, %eax
.p2align 4, 0x90
.LBB3_7: # Parent Loop BB3_2 Depth=1
# Parent Loop BB3_3 Depth=2
# => This Inner Loop Header: Depth=3
movl %eax, %r14d
movslq %ebx, %rcx
imulq $1717986919, %rcx, %rbx # imm = 0x66666667
movq %rbx, %rax
shrq $63, %rax
sarq $34, %rbx
addl %eax, %ebx
decl %r12d
leal -1(%r14), %eax
cmpl $9, %ecx
jg .LBB3_7
# %bb.8: # in Loop: Header=BB3_3 Depth=2
movl (%r13,%rbp,4), %esi
movl $.L.str.2, %edi
xorl %eax, %eax
callq printf
cmpl $2, %r12d
jl .LBB3_10
.p2align 4, 0x90
.LBB3_9: # %.lr.ph
# Parent Loop BB3_2 Depth=1
# Parent Loop BB3_3 Depth=2
# => This Inner Loop Header: Depth=3
movl $32, %edi
callq putchar@PLT
decl %r14d
cmpl $1, %r14d
jg .LBB3_9
jmp .LBB3_10
.LBB3_12: # %._crit_edge37
addq $24, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end3:
.size _Z13imprimeMatrizPii, .Lfunc_end3-_Z13imprimeMatrizPii
.cfi_endproc
# -- End function
.globl _Z12generaMatrizPiiiii # -- Begin function _Z12generaMatrizPiiiii
.p2align 4, 0x90
.type _Z12generaMatrizPiiiii,@function
_Z12generaMatrizPiiiii: # @_Z12generaMatrizPiiiii
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $40, %rsp
.cfi_def_cfa_offset 96
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rdi, 24(%rsp) # 8-byte Spill
testl %esi, %esi
jle .LBB4_6
# %bb.1: # %.preheader.lr.ph
movl %r8d, %ebx
movl %ecx, %ebp
subl %r8d, %ebp
movl %esi, %eax
movq %rax, 32(%rsp) # 8-byte Spill
movl %edx, %r13d
xorl %eax, %eax
movq %rax, 8(%rsp) # 8-byte Spill
xorl %r12d, %r12d
movl %edx, 20(%rsp) # 4-byte Spill
jmp .LBB4_2
.p2align 4, 0x90
.LBB4_5: # %._crit_edge
# in Loop: Header=BB4_2 Depth=1
incq %r12
movl 20(%rsp), %edx # 4-byte Reload
movq 8(%rsp), %rax # 8-byte Reload
addl %edx, %eax
movq %rax, 8(%rsp) # 8-byte Spill
cmpq 32(%rsp), %r12 # 8-byte Folded Reload
je .LBB4_6
.LBB4_2: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB4_4 Depth 2
testl %edx, %edx
jle .LBB4_5
# %bb.3: # %.lr.ph
# in Loop: Header=BB4_2 Depth=1
movl 8(%rsp), %eax # 4-byte Reload
movq 24(%rsp), %rcx # 8-byte Reload
leaq (%rcx,%rax,4), %r14
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB4_4: # Parent Loop BB4_2 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
cltd
idivl %ebp
addl %ebx, %edx
movl %edx, (%r14,%r15,4)
incq %r15
cmpq %r15, %r13
jne .LBB4_4
jmp .LBB4_5
.LBB4_6: # %._crit_edge16
addq $40, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end4:
.size _Z12generaMatrizPiiiii, .Lfunc_end4-_Z12generaMatrizPiiiii
.cfi_endproc
# -- End function
.globl _Z12generaMatrizPiiii # -- Begin function _Z12generaMatrizPiiii
.p2align 4, 0x90
.type _Z12generaMatrizPiiii,@function
_Z12generaMatrizPiiii: # @_Z12generaMatrizPiiii
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $24, %rsp
.cfi_def_cfa_offset 80
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rdi, 8(%rsp) # 8-byte Spill
movl %esi, 4(%rsp) # 4-byte Spill
testl %esi, %esi
jle .LBB5_5
# %bb.1: # %.preheader.lr.ph
movl %ecx, %ebx
movl %edx, %ebp
subl %ecx, %ebp
movl 4(%rsp), %r12d # 4-byte Reload
xorl %eax, %eax
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB5_2: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB5_3 Depth 2
movq %rax, 16(%rsp) # 8-byte Spill
movl %eax, %eax
movq 8(%rsp), %rcx # 8-byte Reload
leaq (%rcx,%rax,4), %r14
xorl %r13d, %r13d
.p2align 4, 0x90
.LBB5_3: # Parent Loop BB5_2 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
cltd
idivl %ebp
addl %ebx, %edx
movl %edx, (%r14,%r13,4)
incq %r13
cmpq %r13, %r12
jne .LBB5_3
# %bb.4: # %._crit_edge
# in Loop: Header=BB5_2 Depth=1
incq %r15
movq 16(%rsp), %rax # 8-byte Reload
addl 4(%rsp), %eax # 4-byte Folded Reload
cmpq %r12, %r15
jne .LBB5_2
.LBB5_5: # %._crit_edge16
addq $24, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end5:
.size _Z12generaMatrizPiiii, .Lfunc_end5-_Z12generaMatrizPiiii
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $3072, %rsp # imm = 0xC00
.cfi_def_cfa_offset 3104
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
xorl %r14d, %r14d
xorl %edi, %edi
callq time
movl %eax, %edi
callq srand
leaq 1024(%rsp), %rbx
movl $1024, %edx # imm = 0x400
movq %rbx, %rdi
xorl %esi, %esi
callq memset@PLT
.p2align 4, 0x90
.LBB6_1: # %.preheader.i
# =>This Loop Header: Depth=1
# Child Loop BB6_2 Depth 2
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB6_2: # Parent Loop BB6_1 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
cltq
imulq $1717986919, %rax, %rcx # imm = 0x66666667
movq %rcx, %rdx
shrq $63, %rdx
sarq $35, %rcx
addl %edx, %ecx
shll $2, %ecx
leal (%rcx,%rcx,4), %ecx
negl %ecx
addl %ecx, %eax
addl $-10, %eax
movl %eax, (%rbx,%r15,4)
incq %r15
cmpq $16, %r15
jne .LBB6_2
# %bb.3: # %._crit_edge.i
# in Loop: Header=BB6_1 Depth=1
incq %r14
addq $64, %rbx
cmpq $16, %r14
jne .LBB6_1
# %bb.4: # %_Z12generaMatrizPiiii.exit
movq %rsp, %rbx
xorl %r14d, %r14d
movl $1024, %edx # imm = 0x400
movq %rbx, %rdi
xorl %esi, %esi
callq memset@PLT
.p2align 4, 0x90
.LBB6_5: # %.preheader.i6
# =>This Loop Header: Depth=1
# Child Loop BB6_6 Depth 2
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB6_6: # Parent Loop BB6_5 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
cltq
imulq $1717986919, %rax, %rcx # imm = 0x66666667
movq %rcx, %rdx
shrq $63, %rdx
sarq $35, %rcx
addl %edx, %ecx
shll $2, %ecx
leal (%rcx,%rcx,4), %ecx
negl %ecx
addl %ecx, %eax
addl $-10, %eax
movl %eax, (%rbx,%r15,4)
incq %r15
cmpq $16, %r15
jne .LBB6_6
# %bb.7: # %._crit_edge.i11
# in Loop: Header=BB6_5 Depth=1
incq %r14
addq $64, %rbx
cmpq $16, %r14
jne .LBB6_5
# %bb.8: # %_Z12generaMatrizPiiii.exit14
leaq 2048(%rsp), %r14
xorl %ebx, %ebx
movl $1024, %edx # imm = 0x400
movq %r14, %rdi
xorl %esi, %esi
callq memset@PLT
leaq 1024(%rsp), %rsi
movq %rsp, %rdx
movq %r14, %rdi
movl $16, %ecx
callq _Z16prodMatricesCudaPiPKiS1_j
testl %eax, %eax
jne .LBB6_9
# %bb.10:
movl $.Lstr, %edi
callq puts@PLT
leaq 1024(%rsp), %rdi
movl $16, %esi
callq _Z13imprimeMatrizPii
movl $.Lstr.1, %edi
callq puts@PLT
movq %rsp, %rdi
movl $16, %esi
callq _Z13imprimeMatrizPii
movl $.Lstr.2, %edi
callq puts@PLT
leaq 2048(%rsp), %rdi
movl $16, %esi
callq _Z13imprimeMatrizPii
callq hipDeviceReset
testl %eax, %eax
jne .LBB6_11
.LBB6_13:
movl %ebx, %eax
addq $3072, %rsp # imm = 0xC00
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.LBB6_9:
.cfi_def_cfa_offset 3104
movq stderr(%rip), %rcx
movl $.L.str.3, %edi
movl $19, %esi
jmp .LBB6_12
.LBB6_11:
movq stderr(%rip), %rcx
movl $.L.str.7, %edi
movl $22, %esi
.LBB6_12:
movl $1, %edx
callq fwrite@PLT
movl $1, %ebx
jmp .LBB6_13
.Lfunc_end6:
.size main, .Lfunc_end6-main
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function _Z16prodMatricesCudaPiPKiS1_j
.LCPI7_0:
.quad 0x3fd0000000000000 # double 0.25
.text
.globl _Z16prodMatricesCudaPiPKiS1_j
.p2align 4, 0x90
.type _Z16prodMatricesCudaPiPKiS1_j,@function
_Z16prodMatricesCudaPiPKiS1_j: # @_Z16prodMatricesCudaPiPKiS1_j
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
subq $144, %rsp
.cfi_def_cfa_offset 192
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %ecx, %ebp
movq %rdx, %r15
movq %rsi, %r12
movq %rdi, %rbx
movq $0, 16(%rsp)
movq $0, 8(%rsp)
movq $0, (%rsp)
cvtsi2sd %ecx, %xmm0
mulsd .LCPI7_0(%rip), %xmm0
callq ceil@PLT
movsd %xmm0, 32(%rsp) # 8-byte Spill
xorl %edi, %edi
callq hipSetDevice
testl %eax, %eax
jne .LBB7_18
# %bb.1:
movl %ebp, %r14d
imull %r14d, %r14d
shlq $2, %r14
movq %rsp, %rdi
movq %r14, %rsi
callq hipMalloc
testl %eax, %eax
jne .LBB7_12
# %bb.2:
leaq 16(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
testl %eax, %eax
jne .LBB7_12
# %bb.3:
leaq 8(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
testl %eax, %eax
jne .LBB7_12
# %bb.4:
movq 16(%rsp), %rdi
movq %r12, %rsi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB7_13
# %bb.5:
movq 8(%rsp), %rdi
movq %r15, %rsi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB7_13
# %bb.6:
cvttsd2si 32(%rsp), %eax # 8-byte Folded Reload
movq %rax, %rdi
shlq $32, %rdi
orq %rax, %rdi
movabsq $17179869188, %rdx # imm = 0x400000004
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB7_8
# %bb.7:
movq (%rsp), %rax
movq 16(%rsp), %rcx
movq 8(%rsp), %rdx
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movq %rdx, 88(%rsp)
movl %ebp, 28(%rsp)
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 88(%rsp), %rax
movq %rax, 128(%rsp)
leaq 28(%rsp), %rax
movq %rax, 136(%rsp)
leaq 72(%rsp), %rdi
leaq 56(%rsp), %rsi
leaq 48(%rsp), %rdx
leaq 40(%rsp), %rcx
callq __hipPopCallConfiguration
movq 72(%rsp), %rsi
movl 80(%rsp), %edx
movq 56(%rsp), %rcx
movl 64(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z14productoKernelPiPKiS1_j, %edi
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB7_8:
callq hipGetLastError
testl %eax, %eax
jne .LBB7_19
# %bb.9:
callq hipDeviceSynchronize
testl %eax, %eax
jne .LBB7_20
# %bb.10:
movq (%rsp), %rsi
movq %rbx, %rdi
movq %r14, %rdx
movl $2, %ecx
callq hipMemcpy
xorl %ebx, %ebx
testl %eax, %eax
je .LBB7_17
# %bb.11:
movq stderr(%rip), %rcx
movl $.L.str.10, %edi
movl $17, %esi
movl $1, %edx
movl %eax, %ebx
jmp .LBB7_16
.LBB7_12:
movl %eax, %ebx
movq stderr(%rip), %rcx
movl $.L.str.9, %edi
jmp .LBB7_14
.LBB7_13:
movl %eax, %ebx
movq stderr(%rip), %rcx
movl $.L.str.10, %edi
.LBB7_14:
movl $17, %esi
.LBB7_15:
movl $1, %edx
.LBB7_16:
callq fwrite@PLT
.LBB7_17:
movq (%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movl %ebx, %eax
addq $144, %rsp
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB7_18:
.cfi_def_cfa_offset 192
movl %eax, %ebx
movq stderr(%rip), %rcx
movl $.L.str.8, %edi
movl $63, %esi
jmp .LBB7_15
.LBB7_19:
movq stderr(%rip), %r14
movl %eax, %ebx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.11, %esi
movq %r14, %rdi
movq %rax, %rdx
xorl %eax, %eax
callq fprintf
jmp .LBB7_17
.LBB7_20:
movq stderr(%rip), %rdi
movl $.L.str.12, %esi
movl %eax, %ebx
movl %eax, %edx
xorl %eax, %eax
callq fprintf
jmp .LBB7_17
.Lfunc_end7:
.size _Z16prodMatricesCudaPiPKiS1_j, .Lfunc_end7-_Z16prodMatricesCudaPiPKiS1_j
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB8_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB8_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z14productoKernelPiPKiS1_j, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end8:
.size __hip_module_ctor, .Lfunc_end8-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB9_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB9_2:
retq
.Lfunc_end9:
.size __hip_module_dtor, .Lfunc_end9-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z14productoKernelPiPKiS1_j,@object # @_Z14productoKernelPiPKiS1_j
.section .rodata,"a",@progbits
.globl _Z14productoKernelPiPKiS1_j
.p2align 3, 0x0
_Z14productoKernelPiPKiS1_j:
.quad _Z29__device_stub__productoKernelPiPKiS1_j
.size _Z14productoKernelPiPKiS1_j, 8
.type .L.str.2,@object # @.str.2
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.2:
.asciz "%d"
.size .L.str.2, 3
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "addWithCuda failed!"
.size .L.str.3, 20
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "hipDeviceReset failed!"
.size .L.str.7, 23
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"
.size .L.str.8, 64
.type .L.str.9,@object # @.str.9
.L.str.9:
.asciz "hipMalloc failed!"
.size .L.str.9, 18
.type .L.str.10,@object # @.str.10
.L.str.10:
.asciz "hipMemcpy failed!"
.size .L.str.10, 18
.type .L.str.11,@object # @.str.11
.L.str.11:
.asciz "addKernel launch failed: %s\n"
.size .L.str.11, 29
.type .L.str.12,@object # @.str.12
.L.str.12:
.asciz "hipDeviceSynchronize returned error code %d after launching addKernel!\n"
.size .L.str.12, 72
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z14productoKernelPiPKiS1_j"
.size .L__unnamed_1, 28
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "\n\tMatriz A"
.size .Lstr, 11
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "\n\tMatriz B"
.size .Lstr.1, 11
.type .Lstr.2,@object # @str.2
.Lstr.2:
.asciz "\n\tResultado del producto:"
.size .Lstr.2, 26
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z29__device_stub__productoKernelPiPKiS1_j
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z14productoKernelPiPKiS1_j
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | typedef enum {DT_UNDEFINED = 0, DT_INT, DT_FLOAT, DT_STRING, DT_BOOLEAN} DataType;
typedef enum {OP_UNDEFINED = 0, OP_EQUAL_TO, OP_GREATER_THAN, OP_GREATER_THAN_OR_EQUAL_TO, OP_LESS_THAN,
OP_LESS_THAN_OR_EQUAL_TO, OP_LOGICAL_AND, OP_LOGICAL_OR, OP_NOT_EQUAL_TO} Operator;
typedef enum {DL_ERROR = 0, DL_FALSE = 1, DL_TRUE = 2, DL_IGNORE = 3} DLNodeValue;
typedef struct {
DataType type;
int intValue;
float floatValue;
char *stringValue;
int booleanValue;
} Value;
#define OFFSET_SAFETY_MAX 100
__device__ int parseDecisionListNode(char *expression, DLNodeValue *value);
__device__ int parseExpression(char *expression, Value *value);
__device__ int parseBinaryExpression(char *expression, Value *value);
__device__ int parseVariableExpression(char *expression, Value *value);
__device__ int parseBooleanConstant(char *expression, Value *value);
__device__ int parseIntegerConstant(char *expression, Value *value);
__device__ int parseFloatConstant(char *expression, Value *value);
__device__ int parseStringConstant(char *expression, Value *value);
__device__ int parseOperator(char *expression, Operator *op);
__device__ void evaluateBinaryExpression(Value *operand1, Operator op, Value *operand2, Value *returnValue);
__device__ void evaluateIntegerComparison(int op1, Operator op, int op2, Value *value);
__device__ void evaluateFloatComparison(float op1, Operator op, float op2, Value *value);
__device__ void evaluateStringComparison(char *op1, Operator op, char *op2, Value *value);
__device__ void evaluateBooleanComparison(int op1, Operator op, int op2, Value *value);
__device__ int dstrlen(char *str);
__device__ int dstreql(char *str1, char *str2);
extern "C"
__global__ void processDecisionLists(int numExpressions, char **expressions, int *output)
{
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < numExpressions) {
char *expression = expressions[idx];
DLNodeValue dlNodeValue;
int offset = parseDecisionListNode(expression, &dlNodeValue);
output[idx] = dlNodeValue;
}
}
/////////////////////////////////////////////////////////////////////
// PARSING FUNCTIONS
//
// NB: All the parseXXX functions return a value that indicates how far the pointer
// should be advanced. The actual return value is in the parameter list.
__device__ int parseDecisionListNode(char *expression, DLNodeValue *dlNodeValue) {
// Currently there are only two valid formats for a DL node:
// <binary expression> <T|F>
// <boolean constant> <T|F>
// In the latter case, the <boolean constant> must always be T since that represents
// the default node. It's redundant to have a condition that always evaluates to true,
// but we keep it anyway because the code to generate, store and evaluate DL's on the
// Java side is much nicer that way.
int offset = 0;
Value value;
offset += parseExpression(expression, &value);
// Check the return from the expression evaluation. If it's false, then we ignore this
// DL node and move on to the next one (so return IGNORE); if true, then we return the
// node's value.
if (value.type != DT_BOOLEAN) {
*dlNodeValue = DL_ERROR;
return 0;
}
if (value.booleanValue == 0) {
*dlNodeValue = DL_IGNORE;
} else {
char nodeValue = *(expression+offset);
if (nodeValue == 'T')
*dlNodeValue = DL_TRUE;
else if (nodeValue == 'F')
*dlNodeValue = DL_FALSE;
else {
*dlNodeValue = DL_ERROR;
return 0;
}
}
return offset;
}
__device__ int parseExpression(char *expression, Value *value) {
int offset = 0;
char c1 = expression[0];
char c2 = expression[1];
offset += 2;
// NB: This is where you'd plug in the code to evaluate additional kinds of expressions
// if you wanted to expand this kernel to be more generic.
if (c1 == 'E' && c2 == 'B')
offset += parseBinaryExpression(expression+offset, value);
else if (c1 == 'E' && c2 == 'V')
offset += parseVariableExpression(expression+offset, value);
else if (c1 == 'C' && c2 == 'B')
offset += parseBooleanConstant(expression+offset, value);
else if (c1 == 'C' && c2 == 'I')
offset += parseIntegerConstant(expression+offset, value);
else if (c1 == 'C' && c2 == 'F')
offset += parseFloatConstant(expression+offset, value);
else if (c1 == 'C' && c2 == 'S')
offset += parseStringConstant(expression+offset, value);
else { // error
value->type = DT_UNDEFINED;
return 0;
}
return offset;
}
__device__ int parseBinaryExpression(char *expression, Value *value) {
int offset = 0;
// Skip over opening {
if (*expression != '{')
return 0;
offset++;
Value operand1;
Operator op;
Value operand2;
offset += parseExpression(expression+offset, &operand1);
offset += parseOperator(expression+offset, &op);
offset += parseExpression(expression+offset, &operand2);
// Evaluate the binary expression
evaluateBinaryExpression(&operand1, op, &operand2, value);
// Skip over closing }
if (*(expression+offset) != '}') {
value->type = DT_UNDEFINED;
return 0;
}
offset++;
return offset;
}
__device__ int parseVariableExpression(char *expression, Value *value) {
int offset = 0;
// Skip over opening {
if (*expression != '{')
return 0;
offset++;
char *token = expression+offset;
while (*(expression+offset) != '}' && offset < OFFSET_SAFETY_MAX)
offset++;
if (offset == OFFSET_SAFETY_MAX)
return 0;
*(expression+offset) = '\0';
offset++;
// TODO: Look up variable in symbol table.
// Of course, to do that we need to *have* a symbol table, so that's first on the list.
return offset;
}
__device__ int parseBooleanConstant(char *expression, Value *value) {
int offset = 0;
// Skip over opening {
if (*expression != '{')
return 0;
offset++;
if (*(expression+offset) == 'F') {
value->booleanValue = 0;
value->type = DT_BOOLEAN;
} else if (*(expression+offset) == 'T') {
value->booleanValue = 1;
value->type = DT_BOOLEAN;
} else { // error
value->type = DT_UNDEFINED;
return 0;
}
offset++;
// Skip over closing }
if (*(expression+offset) != '}')
return 0;
offset++;
return offset;
}
__device__ int parseIntegerConstant(char *expression, Value *value) {
int offset = 0;
// Skip over opening {
if (*expression != '{')
return 0;
offset++;
value->intValue = 0;
while (*(expression+offset) != '}' && offset < OFFSET_SAFETY_MAX) {
value->intValue = value->intValue * 10 + (*(expression+offset) - '0');
offset++;
}
if (offset == OFFSET_SAFETY_MAX)
return 0;
value->type = DT_INT;
offset++;
return offset;
}
__device__ int parseFloatConstant(char *expression, Value *value) {
int offset = 0;
// Skip over opening {
if (expression[0] != '{')
return 0;
offset++;
if (*(expression+offset) != '0')
return 0;
offset++;
if (*(expression+offset) != '.')
return 0;
offset++;
value->floatValue = 0;
int divisor = 10;
while (*(expression+offset) != '}' && offset < OFFSET_SAFETY_MAX) {
value->floatValue = value->floatValue + ((float)(*(expression+offset) - '0'))/divisor;
divisor = divisor * 10;
offset++;
}
if (offset == OFFSET_SAFETY_MAX)
return 0;
value->type = DT_FLOAT;
offset++;
return offset;
}
__device__ int parseStringConstant(char *expression, Value *value) {
int offset = 0;
// Skip over opening {
if (*expression != '{')
return 0;
offset++;
char *token = expression+offset;
while (*(expression+offset) != '}' && offset < OFFSET_SAFETY_MAX)
offset++;
if (offset == OFFSET_SAFETY_MAX)
return 0;
*(expression+offset) = '\0';
offset++;
value->type = DT_STRING;
value->stringValue = token;
return offset;
}
__device__ int parseOperator(char *expression, Operator *op) {
char c1 = expression[0];
char c2 = expression[1];
if (c1 == '=' && c2 == '=')
*op = OP_EQUAL_TO;
else if (c1 == '>' && c2 == '>')
*op = OP_GREATER_THAN;
else if (c1 == '>' && c2 == '=')
*op = OP_GREATER_THAN_OR_EQUAL_TO;
else if (c1 == '<' && c2 == '<')
*op = OP_LESS_THAN;
else if (c1 == '<' && c2 == '=')
*op = OP_LESS_THAN_OR_EQUAL_TO;
else if (c1 == '&' && c2 == '&')
*op = OP_LOGICAL_AND;
else if (c1 == '|' && c2 == '|')
*op = OP_LOGICAL_OR;
else if (c1 == '!' && c2 == '=')
*op = OP_NOT_EQUAL_TO;
else // error
return 0;
return 2;
}
/////////////////////////////////////////////////////////////////////
// EVALUATION FUNCTIONS
__device__ void evaluateBinaryExpression(Value *operand1, Operator op, Value *operand2, Value *value) {
// Indicate an error by not setting the type on the return value
value->type = DT_UNDEFINED;
// For now only allowing comparison of the same types
if (operand1->type != operand2->type)
return;
switch (operand1->type) {
case DT_INT:
evaluateIntegerComparison(operand1->intValue, op, operand2->intValue, value);
break;
case DT_FLOAT:
evaluateFloatComparison(operand1->floatValue, op, operand2->floatValue, value);
break;
case DT_STRING:
evaluateStringComparison(operand1->stringValue, op, operand2->stringValue, value);
break;
case DT_BOOLEAN:
evaluateBooleanComparison(operand1->booleanValue, op, operand2->booleanValue, value);
break;
default:
case DT_UNDEFINED:
// do nothing
break;
}
}
__device__ void evaluateIntegerComparison(int op1, Operator op, int op2, Value *value) {
value->type = DT_BOOLEAN;
int bv = 0; // assume comparison is false
switch (op) {
case OP_EQUAL_TO:
if (op1 == op2) bv = 1;
break;
case OP_GREATER_THAN:
if (op1 > op2) bv = 1;
break;
case OP_GREATER_THAN_OR_EQUAL_TO:
if (op1 >= op2) bv = 1;
break;
case OP_LESS_THAN:
if (op1 < op2) bv = 1;
break;
case OP_LESS_THAN_OR_EQUAL_TO:
if (op1 <= op2) bv = 1;
break;
case OP_LOGICAL_AND:
bv = op1 && op2;
break;
case OP_LOGICAL_OR:
bv = op1 || op2;
break;
case OP_NOT_EQUAL_TO:
if (op1 != op2) bv = 1;
break;
default:
case OP_UNDEFINED:
break;
}
value->booleanValue = bv;
}
__device__ void evaluateFloatComparison(float op1, Operator op, float op2, Value *value) {
value->type = DT_BOOLEAN;
int bv = 0; // assume comparison is false
switch (op) {
case OP_EQUAL_TO:
if (op1 == op2) bv = 1;
break;
case OP_GREATER_THAN:
if (op1 > op2) bv = 1;
break;
case OP_GREATER_THAN_OR_EQUAL_TO:
if (op1 >= op2) bv = 1;
break;
case OP_LESS_THAN:
if (op1 < op2) bv = 1;
break;
case OP_LESS_THAN_OR_EQUAL_TO:
if (op1 <= op2) bv = 1;
break;
case OP_LOGICAL_AND:
bv = op1 && op2;
break;
case OP_LOGICAL_OR:
bv = op1 || op2;
break;
case OP_NOT_EQUAL_TO:
if (op1 != op2) bv = 1;
break;
default:
case OP_UNDEFINED:
return;
}
value->booleanValue = bv;
}
__device__ void evaluateStringComparison(char *op1, Operator op, char *op2, Value *value) {
// Because time is short, we'll have to skimp on the string comparisons
// The greater than and less than operations require a lexical comparison,
// and we don't have access to the standard C library (and thus strcmp()).
// I'm not not going to write my own strcmp() function, so equality is the
// only operation we're going to support for now.
value->type = DT_BOOLEAN;
int bv = 0;
switch (op) {
case OP_EQUAL_TO:
if (dstreql(op1, op2) == 1) bv = 1;
break;
case OP_NOT_EQUAL_TO:
if (dstreql(op1, op2) == 0) bv = 1;
break;
default:
case OP_GREATER_THAN:
case OP_GREATER_THAN_OR_EQUAL_TO:
case OP_LESS_THAN:
case OP_LESS_THAN_OR_EQUAL_TO:
case OP_LOGICAL_AND:
case OP_LOGICAL_OR:
case OP_UNDEFINED:
break;
}
value->booleanValue = bv;
}
__device__ void evaluateBooleanComparison(int op1, Operator op, int op2, Value *value) {
value->type = DT_BOOLEAN;
int bv = 0;
switch (op) {
case OP_EQUAL_TO:
if (op1 == op2) bv = 1;
break;
case OP_LOGICAL_AND:
bv = op1 && op2;
break;
case OP_LOGICAL_OR:
bv = op1 || op2;
break;
case OP_NOT_EQUAL_TO:
if (op1 != op2) bv = 1;
break;
default:
case OP_GREATER_THAN:
case OP_GREATER_THAN_OR_EQUAL_TO:
case OP_LESS_THAN:
case OP_LESS_THAN_OR_EQUAL_TO:
case OP_UNDEFINED:
break;
}
value->booleanValue = bv;
}
/////////////////////////////////////////////////////////////////////
// STRING FUNCTIONS
__device__ int dstrlen(char *str) {
int len = 0;
while (*str != '\0') {
str++;
len++;
}
return len;
}
__device__ int dstreql(char *str1, char *str2) {
while (*str1 == *str2 && *str1 != '\0' && *str2 != '\0') {
str1++;
str2++;
}
if (*str1 == '\0' && *str2 == '\0')
return 1;
return 0;
} | .file "tmpxft_0019d4a9_00000000-6_decisionListKernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2045:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2045:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z21parseDecisionListNodePcP11DLNodeValue
.type _Z21parseDecisionListNodePcP11DLNodeValue, @function
_Z21parseDecisionListNodePcP11DLNodeValue:
.LFB2027:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2027:
.size _Z21parseDecisionListNodePcP11DLNodeValue, .-_Z21parseDecisionListNodePcP11DLNodeValue
.globl _Z15parseExpressionPcP5Value
.type _Z15parseExpressionPcP5Value, @function
_Z15parseExpressionPcP5Value:
.LFB2028:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2028:
.size _Z15parseExpressionPcP5Value, .-_Z15parseExpressionPcP5Value
.globl _Z21parseBinaryExpressionPcP5Value
.type _Z21parseBinaryExpressionPcP5Value, @function
_Z21parseBinaryExpressionPcP5Value:
.LFB2029:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2029:
.size _Z21parseBinaryExpressionPcP5Value, .-_Z21parseBinaryExpressionPcP5Value
.globl _Z23parseVariableExpressionPcP5Value
.type _Z23parseVariableExpressionPcP5Value, @function
_Z23parseVariableExpressionPcP5Value:
.LFB2030:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2030:
.size _Z23parseVariableExpressionPcP5Value, .-_Z23parseVariableExpressionPcP5Value
.globl _Z20parseBooleanConstantPcP5Value
.type _Z20parseBooleanConstantPcP5Value, @function
_Z20parseBooleanConstantPcP5Value:
.LFB2031:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2031:
.size _Z20parseBooleanConstantPcP5Value, .-_Z20parseBooleanConstantPcP5Value
.globl _Z20parseIntegerConstantPcP5Value
.type _Z20parseIntegerConstantPcP5Value, @function
_Z20parseIntegerConstantPcP5Value:
.LFB2032:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2032:
.size _Z20parseIntegerConstantPcP5Value, .-_Z20parseIntegerConstantPcP5Value
.globl _Z18parseFloatConstantPcP5Value
.type _Z18parseFloatConstantPcP5Value, @function
_Z18parseFloatConstantPcP5Value:
.LFB2033:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2033:
.size _Z18parseFloatConstantPcP5Value, .-_Z18parseFloatConstantPcP5Value
.globl _Z19parseStringConstantPcP5Value
.type _Z19parseStringConstantPcP5Value, @function
_Z19parseStringConstantPcP5Value:
.LFB2034:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2034:
.size _Z19parseStringConstantPcP5Value, .-_Z19parseStringConstantPcP5Value
.globl _Z13parseOperatorPcP8Operator
.type _Z13parseOperatorPcP8Operator, @function
_Z13parseOperatorPcP8Operator:
.LFB2035:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2035:
.size _Z13parseOperatorPcP8Operator, .-_Z13parseOperatorPcP8Operator
.globl _Z24evaluateBinaryExpressionP5Value8OperatorS0_S0_
.type _Z24evaluateBinaryExpressionP5Value8OperatorS0_S0_, @function
_Z24evaluateBinaryExpressionP5Value8OperatorS0_S0_:
.LFB2036:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2036:
.size _Z24evaluateBinaryExpressionP5Value8OperatorS0_S0_, .-_Z24evaluateBinaryExpressionP5Value8OperatorS0_S0_
.globl _Z25evaluateIntegerComparisoni8OperatoriP5Value
.type _Z25evaluateIntegerComparisoni8OperatoriP5Value, @function
_Z25evaluateIntegerComparisoni8OperatoriP5Value:
.LFB2037:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2037:
.size _Z25evaluateIntegerComparisoni8OperatoriP5Value, .-_Z25evaluateIntegerComparisoni8OperatoriP5Value
.globl _Z23evaluateFloatComparisonf8OperatorfP5Value
.type _Z23evaluateFloatComparisonf8OperatorfP5Value, @function
_Z23evaluateFloatComparisonf8OperatorfP5Value:
.LFB2038:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2038:
.size _Z23evaluateFloatComparisonf8OperatorfP5Value, .-_Z23evaluateFloatComparisonf8OperatorfP5Value
.globl _Z24evaluateStringComparisonPc8OperatorS_P5Value
.type _Z24evaluateStringComparisonPc8OperatorS_P5Value, @function
_Z24evaluateStringComparisonPc8OperatorS_P5Value:
.LFB2039:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2039:
.size _Z24evaluateStringComparisonPc8OperatorS_P5Value, .-_Z24evaluateStringComparisonPc8OperatorS_P5Value
.globl _Z25evaluateBooleanComparisoni8OperatoriP5Value
.type _Z25evaluateBooleanComparisoni8OperatoriP5Value, @function
_Z25evaluateBooleanComparisoni8OperatoriP5Value:
.LFB2040:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2040:
.size _Z25evaluateBooleanComparisoni8OperatoriP5Value, .-_Z25evaluateBooleanComparisoni8OperatoriP5Value
.globl _Z7dstrlenPc
.type _Z7dstrlenPc, @function
_Z7dstrlenPc:
.LFB2041:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2041:
.size _Z7dstrlenPc, .-_Z7dstrlenPc
.globl _Z7dstreqlPcS_
.type _Z7dstreqlPcS_, @function
_Z7dstreqlPcS_:
.LFB2042:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2042:
.size _Z7dstreqlPcS_, .-_Z7dstreqlPcS_
.globl _Z44__device_stub__Z20processDecisionListsiPPcPiiPPcPi
.type _Z44__device_stub__Z20processDecisionListsiPPcPiiPPcPi, @function
_Z44__device_stub__Z20processDecisionListsiPPcPiiPPcPi:
.LFB2067:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movl %edi, 28(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L39
.L35:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L40
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L39:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq processDecisionLists(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L35
.L40:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2067:
.size _Z44__device_stub__Z20processDecisionListsiPPcPiiPPcPi, .-_Z44__device_stub__Z20processDecisionListsiPPcPiiPPcPi
.globl processDecisionLists
.type processDecisionLists, @function
processDecisionLists:
.LFB2068:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z44__device_stub__Z20processDecisionListsiPPcPiiPPcPi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2068:
.size processDecisionLists, .-processDecisionLists
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "processDecisionLists"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2070:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq processDecisionLists(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2070:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | typedef enum {DT_UNDEFINED = 0, DT_INT, DT_FLOAT, DT_STRING, DT_BOOLEAN} DataType;
typedef enum {OP_UNDEFINED = 0, OP_EQUAL_TO, OP_GREATER_THAN, OP_GREATER_THAN_OR_EQUAL_TO, OP_LESS_THAN,
OP_LESS_THAN_OR_EQUAL_TO, OP_LOGICAL_AND, OP_LOGICAL_OR, OP_NOT_EQUAL_TO} Operator;
typedef enum {DL_ERROR = 0, DL_FALSE = 1, DL_TRUE = 2, DL_IGNORE = 3} DLNodeValue;
typedef struct {
DataType type;
int intValue;
float floatValue;
char *stringValue;
int booleanValue;
} Value;
#define OFFSET_SAFETY_MAX 100
__device__ int parseDecisionListNode(char *expression, DLNodeValue *value);
__device__ int parseExpression(char *expression, Value *value);
__device__ int parseBinaryExpression(char *expression, Value *value);
__device__ int parseVariableExpression(char *expression, Value *value);
__device__ int parseBooleanConstant(char *expression, Value *value);
__device__ int parseIntegerConstant(char *expression, Value *value);
__device__ int parseFloatConstant(char *expression, Value *value);
__device__ int parseStringConstant(char *expression, Value *value);
__device__ int parseOperator(char *expression, Operator *op);
__device__ void evaluateBinaryExpression(Value *operand1, Operator op, Value *operand2, Value *returnValue);
__device__ void evaluateIntegerComparison(int op1, Operator op, int op2, Value *value);
__device__ void evaluateFloatComparison(float op1, Operator op, float op2, Value *value);
__device__ void evaluateStringComparison(char *op1, Operator op, char *op2, Value *value);
__device__ void evaluateBooleanComparison(int op1, Operator op, int op2, Value *value);
__device__ int dstrlen(char *str);
__device__ int dstreql(char *str1, char *str2);
extern "C"
__global__ void processDecisionLists(int numExpressions, char **expressions, int *output)
{
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < numExpressions) {
char *expression = expressions[idx];
DLNodeValue dlNodeValue;
int offset = parseDecisionListNode(expression, &dlNodeValue);
output[idx] = dlNodeValue;
}
}
/////////////////////////////////////////////////////////////////////
// PARSING FUNCTIONS
//
// NB: All the parseXXX functions return a value that indicates how far the pointer
// should be advanced. The actual return value is in the parameter list.
__device__ int parseDecisionListNode(char *expression, DLNodeValue *dlNodeValue) {
// Currently there are only two valid formats for a DL node:
// <binary expression> <T|F>
// <boolean constant> <T|F>
// In the latter case, the <boolean constant> must always be T since that represents
// the default node. It's redundant to have a condition that always evaluates to true,
// but we keep it anyway because the code to generate, store and evaluate DL's on the
// Java side is much nicer that way.
int offset = 0;
Value value;
offset += parseExpression(expression, &value);
// Check the return from the expression evaluation. If it's false, then we ignore this
// DL node and move on to the next one (so return IGNORE); if true, then we return the
// node's value.
if (value.type != DT_BOOLEAN) {
*dlNodeValue = DL_ERROR;
return 0;
}
if (value.booleanValue == 0) {
*dlNodeValue = DL_IGNORE;
} else {
char nodeValue = *(expression+offset);
if (nodeValue == 'T')
*dlNodeValue = DL_TRUE;
else if (nodeValue == 'F')
*dlNodeValue = DL_FALSE;
else {
*dlNodeValue = DL_ERROR;
return 0;
}
}
return offset;
}
__device__ int parseExpression(char *expression, Value *value) {
int offset = 0;
char c1 = expression[0];
char c2 = expression[1];
offset += 2;
// NB: This is where you'd plug in the code to evaluate additional kinds of expressions
// if you wanted to expand this kernel to be more generic.
if (c1 == 'E' && c2 == 'B')
offset += parseBinaryExpression(expression+offset, value);
else if (c1 == 'E' && c2 == 'V')
offset += parseVariableExpression(expression+offset, value);
else if (c1 == 'C' && c2 == 'B')
offset += parseBooleanConstant(expression+offset, value);
else if (c1 == 'C' && c2 == 'I')
offset += parseIntegerConstant(expression+offset, value);
else if (c1 == 'C' && c2 == 'F')
offset += parseFloatConstant(expression+offset, value);
else if (c1 == 'C' && c2 == 'S')
offset += parseStringConstant(expression+offset, value);
else { // error
value->type = DT_UNDEFINED;
return 0;
}
return offset;
}
__device__ int parseBinaryExpression(char *expression, Value *value) {
int offset = 0;
// Skip over opening {
if (*expression != '{')
return 0;
offset++;
Value operand1;
Operator op;
Value operand2;
offset += parseExpression(expression+offset, &operand1);
offset += parseOperator(expression+offset, &op);
offset += parseExpression(expression+offset, &operand2);
// Evaluate the binary expression
evaluateBinaryExpression(&operand1, op, &operand2, value);
// Skip over closing }
if (*(expression+offset) != '}') {
value->type = DT_UNDEFINED;
return 0;
}
offset++;
return offset;
}
__device__ int parseVariableExpression(char *expression, Value *value) {
int offset = 0;
// Skip over opening {
if (*expression != '{')
return 0;
offset++;
char *token = expression+offset;
while (*(expression+offset) != '}' && offset < OFFSET_SAFETY_MAX)
offset++;
if (offset == OFFSET_SAFETY_MAX)
return 0;
*(expression+offset) = '\0';
offset++;
// TODO: Look up variable in symbol table.
// Of course, to do that we need to *have* a symbol table, so that's first on the list.
return offset;
}
__device__ int parseBooleanConstant(char *expression, Value *value) {
int offset = 0;
// Skip over opening {
if (*expression != '{')
return 0;
offset++;
if (*(expression+offset) == 'F') {
value->booleanValue = 0;
value->type = DT_BOOLEAN;
} else if (*(expression+offset) == 'T') {
value->booleanValue = 1;
value->type = DT_BOOLEAN;
} else { // error
value->type = DT_UNDEFINED;
return 0;
}
offset++;
// Skip over closing }
if (*(expression+offset) != '}')
return 0;
offset++;
return offset;
}
__device__ int parseIntegerConstant(char *expression, Value *value) {
int offset = 0;
// Skip over opening {
if (*expression != '{')
return 0;
offset++;
value->intValue = 0;
while (*(expression+offset) != '}' && offset < OFFSET_SAFETY_MAX) {
value->intValue = value->intValue * 10 + (*(expression+offset) - '0');
offset++;
}
if (offset == OFFSET_SAFETY_MAX)
return 0;
value->type = DT_INT;
offset++;
return offset;
}
__device__ int parseFloatConstant(char *expression, Value *value) {
int offset = 0;
// Skip over opening {
if (expression[0] != '{')
return 0;
offset++;
if (*(expression+offset) != '0')
return 0;
offset++;
if (*(expression+offset) != '.')
return 0;
offset++;
value->floatValue = 0;
int divisor = 10;
while (*(expression+offset) != '}' && offset < OFFSET_SAFETY_MAX) {
value->floatValue = value->floatValue + ((float)(*(expression+offset) - '0'))/divisor;
divisor = divisor * 10;
offset++;
}
if (offset == OFFSET_SAFETY_MAX)
return 0;
value->type = DT_FLOAT;
offset++;
return offset;
}
__device__ int parseStringConstant(char *expression, Value *value) {
int offset = 0;
// Skip over opening {
if (*expression != '{')
return 0;
offset++;
char *token = expression+offset;
while (*(expression+offset) != '}' && offset < OFFSET_SAFETY_MAX)
offset++;
if (offset == OFFSET_SAFETY_MAX)
return 0;
*(expression+offset) = '\0';
offset++;
value->type = DT_STRING;
value->stringValue = token;
return offset;
}
__device__ int parseOperator(char *expression, Operator *op) {
char c1 = expression[0];
char c2 = expression[1];
if (c1 == '=' && c2 == '=')
*op = OP_EQUAL_TO;
else if (c1 == '>' && c2 == '>')
*op = OP_GREATER_THAN;
else if (c1 == '>' && c2 == '=')
*op = OP_GREATER_THAN_OR_EQUAL_TO;
else if (c1 == '<' && c2 == '<')
*op = OP_LESS_THAN;
else if (c1 == '<' && c2 == '=')
*op = OP_LESS_THAN_OR_EQUAL_TO;
else if (c1 == '&' && c2 == '&')
*op = OP_LOGICAL_AND;
else if (c1 == '|' && c2 == '|')
*op = OP_LOGICAL_OR;
else if (c1 == '!' && c2 == '=')
*op = OP_NOT_EQUAL_TO;
else // error
return 0;
return 2;
}
/////////////////////////////////////////////////////////////////////
// EVALUATION FUNCTIONS
__device__ void evaluateBinaryExpression(Value *operand1, Operator op, Value *operand2, Value *value) {
// Indicate an error by not setting the type on the return value
value->type = DT_UNDEFINED;
// For now only allowing comparison of the same types
if (operand1->type != operand2->type)
return;
switch (operand1->type) {
case DT_INT:
evaluateIntegerComparison(operand1->intValue, op, operand2->intValue, value);
break;
case DT_FLOAT:
evaluateFloatComparison(operand1->floatValue, op, operand2->floatValue, value);
break;
case DT_STRING:
evaluateStringComparison(operand1->stringValue, op, operand2->stringValue, value);
break;
case DT_BOOLEAN:
evaluateBooleanComparison(operand1->booleanValue, op, operand2->booleanValue, value);
break;
default:
case DT_UNDEFINED:
// do nothing
break;
}
}
__device__ void evaluateIntegerComparison(int op1, Operator op, int op2, Value *value) {
value->type = DT_BOOLEAN;
int bv = 0; // assume comparison is false
switch (op) {
case OP_EQUAL_TO:
if (op1 == op2) bv = 1;
break;
case OP_GREATER_THAN:
if (op1 > op2) bv = 1;
break;
case OP_GREATER_THAN_OR_EQUAL_TO:
if (op1 >= op2) bv = 1;
break;
case OP_LESS_THAN:
if (op1 < op2) bv = 1;
break;
case OP_LESS_THAN_OR_EQUAL_TO:
if (op1 <= op2) bv = 1;
break;
case OP_LOGICAL_AND:
bv = op1 && op2;
break;
case OP_LOGICAL_OR:
bv = op1 || op2;
break;
case OP_NOT_EQUAL_TO:
if (op1 != op2) bv = 1;
break;
default:
case OP_UNDEFINED:
break;
}
value->booleanValue = bv;
}
__device__ void evaluateFloatComparison(float op1, Operator op, float op2, Value *value) {
value->type = DT_BOOLEAN;
int bv = 0; // assume comparison is false
switch (op) {
case OP_EQUAL_TO:
if (op1 == op2) bv = 1;
break;
case OP_GREATER_THAN:
if (op1 > op2) bv = 1;
break;
case OP_GREATER_THAN_OR_EQUAL_TO:
if (op1 >= op2) bv = 1;
break;
case OP_LESS_THAN:
if (op1 < op2) bv = 1;
break;
case OP_LESS_THAN_OR_EQUAL_TO:
if (op1 <= op2) bv = 1;
break;
case OP_LOGICAL_AND:
bv = op1 && op2;
break;
case OP_LOGICAL_OR:
bv = op1 || op2;
break;
case OP_NOT_EQUAL_TO:
if (op1 != op2) bv = 1;
break;
default:
case OP_UNDEFINED:
return;
}
value->booleanValue = bv;
}
__device__ void evaluateStringComparison(char *op1, Operator op, char *op2, Value *value) {
// Because time is short, we'll have to skimp on the string comparisons
// The greater than and less than operations require a lexical comparison,
// and we don't have access to the standard C library (and thus strcmp()).
// I'm not not going to write my own strcmp() function, so equality is the
// only operation we're going to support for now.
value->type = DT_BOOLEAN;
int bv = 0;
switch (op) {
case OP_EQUAL_TO:
if (dstreql(op1, op2) == 1) bv = 1;
break;
case OP_NOT_EQUAL_TO:
if (dstreql(op1, op2) == 0) bv = 1;
break;
default:
case OP_GREATER_THAN:
case OP_GREATER_THAN_OR_EQUAL_TO:
case OP_LESS_THAN:
case OP_LESS_THAN_OR_EQUAL_TO:
case OP_LOGICAL_AND:
case OP_LOGICAL_OR:
case OP_UNDEFINED:
break;
}
value->booleanValue = bv;
}
__device__ void evaluateBooleanComparison(int op1, Operator op, int op2, Value *value) {
value->type = DT_BOOLEAN;
int bv = 0;
switch (op) {
case OP_EQUAL_TO:
if (op1 == op2) bv = 1;
break;
case OP_LOGICAL_AND:
bv = op1 && op2;
break;
case OP_LOGICAL_OR:
bv = op1 || op2;
break;
case OP_NOT_EQUAL_TO:
if (op1 != op2) bv = 1;
break;
default:
case OP_GREATER_THAN:
case OP_GREATER_THAN_OR_EQUAL_TO:
case OP_LESS_THAN:
case OP_LESS_THAN_OR_EQUAL_TO:
case OP_UNDEFINED:
break;
}
value->booleanValue = bv;
}
/////////////////////////////////////////////////////////////////////
// STRING FUNCTIONS
__device__ int dstrlen(char *str) {
int len = 0;
while (*str != '\0') {
str++;
len++;
}
return len;
}
__device__ int dstreql(char *str1, char *str2) {
while (*str1 == *str2 && *str1 != '\0' && *str2 != '\0') {
str1++;
str2++;
}
if (*str1 == '\0' && *str2 == '\0')
return 1;
return 0;
} | #include <hip/hip_runtime.h>
typedef enum {DT_UNDEFINED = 0, DT_INT, DT_FLOAT, DT_STRING, DT_BOOLEAN} DataType;
typedef enum {OP_UNDEFINED = 0, OP_EQUAL_TO, OP_GREATER_THAN, OP_GREATER_THAN_OR_EQUAL_TO, OP_LESS_THAN,
OP_LESS_THAN_OR_EQUAL_TO, OP_LOGICAL_AND, OP_LOGICAL_OR, OP_NOT_EQUAL_TO} Operator;
typedef enum {DL_ERROR = 0, DL_FALSE = 1, DL_TRUE = 2, DL_IGNORE = 3} DLNodeValue;
typedef struct {
DataType type;
int intValue;
float floatValue;
char *stringValue;
int booleanValue;
} Value;
#define OFFSET_SAFETY_MAX 100
__device__ int parseDecisionListNode(char *expression, DLNodeValue *value);
__device__ int parseExpression(char *expression, Value *value);
__device__ int parseBinaryExpression(char *expression, Value *value);
__device__ int parseVariableExpression(char *expression, Value *value);
__device__ int parseBooleanConstant(char *expression, Value *value);
__device__ int parseIntegerConstant(char *expression, Value *value);
__device__ int parseFloatConstant(char *expression, Value *value);
__device__ int parseStringConstant(char *expression, Value *value);
__device__ int parseOperator(char *expression, Operator *op);
__device__ void evaluateBinaryExpression(Value *operand1, Operator op, Value *operand2, Value *returnValue);
__device__ void evaluateIntegerComparison(int op1, Operator op, int op2, Value *value);
__device__ void evaluateFloatComparison(float op1, Operator op, float op2, Value *value);
__device__ void evaluateStringComparison(char *op1, Operator op, char *op2, Value *value);
__device__ void evaluateBooleanComparison(int op1, Operator op, int op2, Value *value);
__device__ int dstrlen(char *str);
__device__ int dstreql(char *str1, char *str2);
extern "C"
__global__ void processDecisionLists(int numExpressions, char **expressions, int *output)
{
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < numExpressions) {
char *expression = expressions[idx];
DLNodeValue dlNodeValue;
int offset = parseDecisionListNode(expression, &dlNodeValue);
output[idx] = dlNodeValue;
}
}
/////////////////////////////////////////////////////////////////////
// PARSING FUNCTIONS
//
// NB: All the parseXXX functions return a value that indicates how far the pointer
// should be advanced. The actual return value is in the parameter list.
__device__ int parseDecisionListNode(char *expression, DLNodeValue *dlNodeValue) {
// Currently there are only two valid formats for a DL node:
// <binary expression> <T|F>
// <boolean constant> <T|F>
// In the latter case, the <boolean constant> must always be T since that represents
// the default node. It's redundant to have a condition that always evaluates to true,
// but we keep it anyway because the code to generate, store and evaluate DL's on the
// Java side is much nicer that way.
int offset = 0;
Value value;
offset += parseExpression(expression, &value);
// Check the return from the expression evaluation. If it's false, then we ignore this
// DL node and move on to the next one (so return IGNORE); if true, then we return the
// node's value.
if (value.type != DT_BOOLEAN) {
*dlNodeValue = DL_ERROR;
return 0;
}
if (value.booleanValue == 0) {
*dlNodeValue = DL_IGNORE;
} else {
char nodeValue = *(expression+offset);
if (nodeValue == 'T')
*dlNodeValue = DL_TRUE;
else if (nodeValue == 'F')
*dlNodeValue = DL_FALSE;
else {
*dlNodeValue = DL_ERROR;
return 0;
}
}
return offset;
}
__device__ int parseExpression(char *expression, Value *value) {
int offset = 0;
char c1 = expression[0];
char c2 = expression[1];
offset += 2;
// NB: This is where you'd plug in the code to evaluate additional kinds of expressions
// if you wanted to expand this kernel to be more generic.
if (c1 == 'E' && c2 == 'B')
offset += parseBinaryExpression(expression+offset, value);
else if (c1 == 'E' && c2 == 'V')
offset += parseVariableExpression(expression+offset, value);
else if (c1 == 'C' && c2 == 'B')
offset += parseBooleanConstant(expression+offset, value);
else if (c1 == 'C' && c2 == 'I')
offset += parseIntegerConstant(expression+offset, value);
else if (c1 == 'C' && c2 == 'F')
offset += parseFloatConstant(expression+offset, value);
else if (c1 == 'C' && c2 == 'S')
offset += parseStringConstant(expression+offset, value);
else { // error
value->type = DT_UNDEFINED;
return 0;
}
return offset;
}
__device__ int parseBinaryExpression(char *expression, Value *value) {
int offset = 0;
// Skip over opening {
if (*expression != '{')
return 0;
offset++;
Value operand1;
Operator op;
Value operand2;
offset += parseExpression(expression+offset, &operand1);
offset += parseOperator(expression+offset, &op);
offset += parseExpression(expression+offset, &operand2);
// Evaluate the binary expression
evaluateBinaryExpression(&operand1, op, &operand2, value);
// Skip over closing }
if (*(expression+offset) != '}') {
value->type = DT_UNDEFINED;
return 0;
}
offset++;
return offset;
}
__device__ int parseVariableExpression(char *expression, Value *value) {
int offset = 0;
// Skip over opening {
if (*expression != '{')
return 0;
offset++;
char *token = expression+offset;
while (*(expression+offset) != '}' && offset < OFFSET_SAFETY_MAX)
offset++;
if (offset == OFFSET_SAFETY_MAX)
return 0;
*(expression+offset) = '\0';
offset++;
// TODO: Look up variable in symbol table.
// Of course, to do that we need to *have* a symbol table, so that's first on the list.
return offset;
}
__device__ int parseBooleanConstant(char *expression, Value *value) {
int offset = 0;
// Skip over opening {
if (*expression != '{')
return 0;
offset++;
if (*(expression+offset) == 'F') {
value->booleanValue = 0;
value->type = DT_BOOLEAN;
} else if (*(expression+offset) == 'T') {
value->booleanValue = 1;
value->type = DT_BOOLEAN;
} else { // error
value->type = DT_UNDEFINED;
return 0;
}
offset++;
// Skip over closing }
if (*(expression+offset) != '}')
return 0;
offset++;
return offset;
}
__device__ int parseIntegerConstant(char *expression, Value *value) {
int offset = 0;
// Skip over opening {
if (*expression != '{')
return 0;
offset++;
value->intValue = 0;
while (*(expression+offset) != '}' && offset < OFFSET_SAFETY_MAX) {
value->intValue = value->intValue * 10 + (*(expression+offset) - '0');
offset++;
}
if (offset == OFFSET_SAFETY_MAX)
return 0;
value->type = DT_INT;
offset++;
return offset;
}
__device__ int parseFloatConstant(char *expression, Value *value) {
int offset = 0;
// Skip over opening {
if (expression[0] != '{')
return 0;
offset++;
if (*(expression+offset) != '0')
return 0;
offset++;
if (*(expression+offset) != '.')
return 0;
offset++;
value->floatValue = 0;
int divisor = 10;
while (*(expression+offset) != '}' && offset < OFFSET_SAFETY_MAX) {
value->floatValue = value->floatValue + ((float)(*(expression+offset) - '0'))/divisor;
divisor = divisor * 10;
offset++;
}
if (offset == OFFSET_SAFETY_MAX)
return 0;
value->type = DT_FLOAT;
offset++;
return offset;
}
__device__ int parseStringConstant(char *expression, Value *value) {
int offset = 0;
// Skip over opening {
if (*expression != '{')
return 0;
offset++;
char *token = expression+offset;
while (*(expression+offset) != '}' && offset < OFFSET_SAFETY_MAX)
offset++;
if (offset == OFFSET_SAFETY_MAX)
return 0;
*(expression+offset) = '\0';
offset++;
value->type = DT_STRING;
value->stringValue = token;
return offset;
}
__device__ int parseOperator(char *expression, Operator *op) {
char c1 = expression[0];
char c2 = expression[1];
if (c1 == '=' && c2 == '=')
*op = OP_EQUAL_TO;
else if (c1 == '>' && c2 == '>')
*op = OP_GREATER_THAN;
else if (c1 == '>' && c2 == '=')
*op = OP_GREATER_THAN_OR_EQUAL_TO;
else if (c1 == '<' && c2 == '<')
*op = OP_LESS_THAN;
else if (c1 == '<' && c2 == '=')
*op = OP_LESS_THAN_OR_EQUAL_TO;
else if (c1 == '&' && c2 == '&')
*op = OP_LOGICAL_AND;
else if (c1 == '|' && c2 == '|')
*op = OP_LOGICAL_OR;
else if (c1 == '!' && c2 == '=')
*op = OP_NOT_EQUAL_TO;
else // error
return 0;
return 2;
}
/////////////////////////////////////////////////////////////////////
// EVALUATION FUNCTIONS
__device__ void evaluateBinaryExpression(Value *operand1, Operator op, Value *operand2, Value *value) {
// Indicate an error by not setting the type on the return value
value->type = DT_UNDEFINED;
// For now only allowing comparison of the same types
if (operand1->type != operand2->type)
return;
switch (operand1->type) {
case DT_INT:
evaluateIntegerComparison(operand1->intValue, op, operand2->intValue, value);
break;
case DT_FLOAT:
evaluateFloatComparison(operand1->floatValue, op, operand2->floatValue, value);
break;
case DT_STRING:
evaluateStringComparison(operand1->stringValue, op, operand2->stringValue, value);
break;
case DT_BOOLEAN:
evaluateBooleanComparison(operand1->booleanValue, op, operand2->booleanValue, value);
break;
default:
case DT_UNDEFINED:
// do nothing
break;
}
}
__device__ void evaluateIntegerComparison(int op1, Operator op, int op2, Value *value) {
value->type = DT_BOOLEAN;
int bv = 0; // assume comparison is false
switch (op) {
case OP_EQUAL_TO:
if (op1 == op2) bv = 1;
break;
case OP_GREATER_THAN:
if (op1 > op2) bv = 1;
break;
case OP_GREATER_THAN_OR_EQUAL_TO:
if (op1 >= op2) bv = 1;
break;
case OP_LESS_THAN:
if (op1 < op2) bv = 1;
break;
case OP_LESS_THAN_OR_EQUAL_TO:
if (op1 <= op2) bv = 1;
break;
case OP_LOGICAL_AND:
bv = op1 && op2;
break;
case OP_LOGICAL_OR:
bv = op1 || op2;
break;
case OP_NOT_EQUAL_TO:
if (op1 != op2) bv = 1;
break;
default:
case OP_UNDEFINED:
break;
}
value->booleanValue = bv;
}
__device__ void evaluateFloatComparison(float op1, Operator op, float op2, Value *value) {
value->type = DT_BOOLEAN;
int bv = 0; // assume comparison is false
switch (op) {
case OP_EQUAL_TO:
if (op1 == op2) bv = 1;
break;
case OP_GREATER_THAN:
if (op1 > op2) bv = 1;
break;
case OP_GREATER_THAN_OR_EQUAL_TO:
if (op1 >= op2) bv = 1;
break;
case OP_LESS_THAN:
if (op1 < op2) bv = 1;
break;
case OP_LESS_THAN_OR_EQUAL_TO:
if (op1 <= op2) bv = 1;
break;
case OP_LOGICAL_AND:
bv = op1 && op2;
break;
case OP_LOGICAL_OR:
bv = op1 || op2;
break;
case OP_NOT_EQUAL_TO:
if (op1 != op2) bv = 1;
break;
default:
case OP_UNDEFINED:
return;
}
value->booleanValue = bv;
}
__device__ void evaluateStringComparison(char *op1, Operator op, char *op2, Value *value) {
// Because time is short, we'll have to skimp on the string comparisons
// The greater than and less than operations require a lexical comparison,
// and we don't have access to the standard C library (and thus strcmp()).
// I'm not not going to write my own strcmp() function, so equality is the
// only operation we're going to support for now.
value->type = DT_BOOLEAN;
int bv = 0;
switch (op) {
case OP_EQUAL_TO:
if (dstreql(op1, op2) == 1) bv = 1;
break;
case OP_NOT_EQUAL_TO:
if (dstreql(op1, op2) == 0) bv = 1;
break;
default:
case OP_GREATER_THAN:
case OP_GREATER_THAN_OR_EQUAL_TO:
case OP_LESS_THAN:
case OP_LESS_THAN_OR_EQUAL_TO:
case OP_LOGICAL_AND:
case OP_LOGICAL_OR:
case OP_UNDEFINED:
break;
}
value->booleanValue = bv;
}
__device__ void evaluateBooleanComparison(int op1, Operator op, int op2, Value *value) {
value->type = DT_BOOLEAN;
int bv = 0;
switch (op) {
case OP_EQUAL_TO:
if (op1 == op2) bv = 1;
break;
case OP_LOGICAL_AND:
bv = op1 && op2;
break;
case OP_LOGICAL_OR:
bv = op1 || op2;
break;
case OP_NOT_EQUAL_TO:
if (op1 != op2) bv = 1;
break;
default:
case OP_GREATER_THAN:
case OP_GREATER_THAN_OR_EQUAL_TO:
case OP_LESS_THAN:
case OP_LESS_THAN_OR_EQUAL_TO:
case OP_UNDEFINED:
break;
}
value->booleanValue = bv;
}
/////////////////////////////////////////////////////////////////////
// STRING FUNCTIONS
__device__ int dstrlen(char *str) {
int len = 0;
while (*str != '\0') {
str++;
len++;
}
return len;
}
__device__ int dstreql(char *str1, char *str2) {
while (*str1 == *str2 && *str1 != '\0' && *str2 != '\0') {
str1++;
str2++;
}
if (*str1 == '\0' && *str2 == '\0')
return 1;
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
typedef enum {DT_UNDEFINED = 0, DT_INT, DT_FLOAT, DT_STRING, DT_BOOLEAN} DataType;
typedef enum {OP_UNDEFINED = 0, OP_EQUAL_TO, OP_GREATER_THAN, OP_GREATER_THAN_OR_EQUAL_TO, OP_LESS_THAN,
OP_LESS_THAN_OR_EQUAL_TO, OP_LOGICAL_AND, OP_LOGICAL_OR, OP_NOT_EQUAL_TO} Operator;
typedef enum {DL_ERROR = 0, DL_FALSE = 1, DL_TRUE = 2, DL_IGNORE = 3} DLNodeValue;
typedef struct {
DataType type;
int intValue;
float floatValue;
char *stringValue;
int booleanValue;
} Value;
#define OFFSET_SAFETY_MAX 100
__device__ int parseDecisionListNode(char *expression, DLNodeValue *value);
__device__ int parseExpression(char *expression, Value *value);
__device__ int parseBinaryExpression(char *expression, Value *value);
__device__ int parseVariableExpression(char *expression, Value *value);
__device__ int parseBooleanConstant(char *expression, Value *value);
__device__ int parseIntegerConstant(char *expression, Value *value);
__device__ int parseFloatConstant(char *expression, Value *value);
__device__ int parseStringConstant(char *expression, Value *value);
__device__ int parseOperator(char *expression, Operator *op);
__device__ void evaluateBinaryExpression(Value *operand1, Operator op, Value *operand2, Value *returnValue);
__device__ void evaluateIntegerComparison(int op1, Operator op, int op2, Value *value);
__device__ void evaluateFloatComparison(float op1, Operator op, float op2, Value *value);
__device__ void evaluateStringComparison(char *op1, Operator op, char *op2, Value *value);
__device__ void evaluateBooleanComparison(int op1, Operator op, int op2, Value *value);
__device__ int dstrlen(char *str);
__device__ int dstreql(char *str1, char *str2);
extern "C"
__global__ void processDecisionLists(int numExpressions, char **expressions, int *output)
{
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < numExpressions) {
char *expression = expressions[idx];
DLNodeValue dlNodeValue;
int offset = parseDecisionListNode(expression, &dlNodeValue);
output[idx] = dlNodeValue;
}
}
/////////////////////////////////////////////////////////////////////
// PARSING FUNCTIONS
//
// NB: All the parseXXX functions return a value that indicates how far the pointer
// should be advanced. The actual return value is in the parameter list.
__device__ int parseDecisionListNode(char *expression, DLNodeValue *dlNodeValue) {
// Currently there are only two valid formats for a DL node:
// <binary expression> <T|F>
// <boolean constant> <T|F>
// In the latter case, the <boolean constant> must always be T since that represents
// the default node. It's redundant to have a condition that always evaluates to true,
// but we keep it anyway because the code to generate, store and evaluate DL's on the
// Java side is much nicer that way.
int offset = 0;
Value value;
offset += parseExpression(expression, &value);
// Check the return from the expression evaluation. If it's false, then we ignore this
// DL node and move on to the next one (so return IGNORE); if true, then we return the
// node's value.
if (value.type != DT_BOOLEAN) {
*dlNodeValue = DL_ERROR;
return 0;
}
if (value.booleanValue == 0) {
*dlNodeValue = DL_IGNORE;
} else {
char nodeValue = *(expression+offset);
if (nodeValue == 'T')
*dlNodeValue = DL_TRUE;
else if (nodeValue == 'F')
*dlNodeValue = DL_FALSE;
else {
*dlNodeValue = DL_ERROR;
return 0;
}
}
return offset;
}
__device__ int parseExpression(char *expression, Value *value) {
int offset = 0;
char c1 = expression[0];
char c2 = expression[1];
offset += 2;
// NB: This is where you'd plug in the code to evaluate additional kinds of expressions
// if you wanted to expand this kernel to be more generic.
if (c1 == 'E' && c2 == 'B')
offset += parseBinaryExpression(expression+offset, value);
else if (c1 == 'E' && c2 == 'V')
offset += parseVariableExpression(expression+offset, value);
else if (c1 == 'C' && c2 == 'B')
offset += parseBooleanConstant(expression+offset, value);
else if (c1 == 'C' && c2 == 'I')
offset += parseIntegerConstant(expression+offset, value);
else if (c1 == 'C' && c2 == 'F')
offset += parseFloatConstant(expression+offset, value);
else if (c1 == 'C' && c2 == 'S')
offset += parseStringConstant(expression+offset, value);
else { // error
value->type = DT_UNDEFINED;
return 0;
}
return offset;
}
__device__ int parseBinaryExpression(char *expression, Value *value) {
int offset = 0;
// Skip over opening {
if (*expression != '{')
return 0;
offset++;
Value operand1;
Operator op;
Value operand2;
offset += parseExpression(expression+offset, &operand1);
offset += parseOperator(expression+offset, &op);
offset += parseExpression(expression+offset, &operand2);
// Evaluate the binary expression
evaluateBinaryExpression(&operand1, op, &operand2, value);
// Skip over closing }
if (*(expression+offset) != '}') {
value->type = DT_UNDEFINED;
return 0;
}
offset++;
return offset;
}
__device__ int parseVariableExpression(char *expression, Value *value) {
int offset = 0;
// Skip over opening {
if (*expression != '{')
return 0;
offset++;
char *token = expression+offset;
while (*(expression+offset) != '}' && offset < OFFSET_SAFETY_MAX)
offset++;
if (offset == OFFSET_SAFETY_MAX)
return 0;
*(expression+offset) = '\0';
offset++;
// TODO: Look up variable in symbol table.
// Of course, to do that we need to *have* a symbol table, so that's first on the list.
return offset;
}
__device__ int parseBooleanConstant(char *expression, Value *value) {
int offset = 0;
// Skip over opening {
if (*expression != '{')
return 0;
offset++;
if (*(expression+offset) == 'F') {
value->booleanValue = 0;
value->type = DT_BOOLEAN;
} else if (*(expression+offset) == 'T') {
value->booleanValue = 1;
value->type = DT_BOOLEAN;
} else { // error
value->type = DT_UNDEFINED;
return 0;
}
offset++;
// Skip over closing }
if (*(expression+offset) != '}')
return 0;
offset++;
return offset;
}
__device__ int parseIntegerConstant(char *expression, Value *value) {
int offset = 0;
// Skip over opening {
if (*expression != '{')
return 0;
offset++;
value->intValue = 0;
while (*(expression+offset) != '}' && offset < OFFSET_SAFETY_MAX) {
value->intValue = value->intValue * 10 + (*(expression+offset) - '0');
offset++;
}
if (offset == OFFSET_SAFETY_MAX)
return 0;
value->type = DT_INT;
offset++;
return offset;
}
__device__ int parseFloatConstant(char *expression, Value *value) {
int offset = 0;
// Skip over opening {
if (expression[0] != '{')
return 0;
offset++;
if (*(expression+offset) != '0')
return 0;
offset++;
if (*(expression+offset) != '.')
return 0;
offset++;
value->floatValue = 0;
int divisor = 10;
while (*(expression+offset) != '}' && offset < OFFSET_SAFETY_MAX) {
value->floatValue = value->floatValue + ((float)(*(expression+offset) - '0'))/divisor;
divisor = divisor * 10;
offset++;
}
if (offset == OFFSET_SAFETY_MAX)
return 0;
value->type = DT_FLOAT;
offset++;
return offset;
}
__device__ int parseStringConstant(char *expression, Value *value) {
int offset = 0;
// Skip over opening {
if (*expression != '{')
return 0;
offset++;
char *token = expression+offset;
while (*(expression+offset) != '}' && offset < OFFSET_SAFETY_MAX)
offset++;
if (offset == OFFSET_SAFETY_MAX)
return 0;
*(expression+offset) = '\0';
offset++;
value->type = DT_STRING;
value->stringValue = token;
return offset;
}
__device__ int parseOperator(char *expression, Operator *op) {
char c1 = expression[0];
char c2 = expression[1];
if (c1 == '=' && c2 == '=')
*op = OP_EQUAL_TO;
else if (c1 == '>' && c2 == '>')
*op = OP_GREATER_THAN;
else if (c1 == '>' && c2 == '=')
*op = OP_GREATER_THAN_OR_EQUAL_TO;
else if (c1 == '<' && c2 == '<')
*op = OP_LESS_THAN;
else if (c1 == '<' && c2 == '=')
*op = OP_LESS_THAN_OR_EQUAL_TO;
else if (c1 == '&' && c2 == '&')
*op = OP_LOGICAL_AND;
else if (c1 == '|' && c2 == '|')
*op = OP_LOGICAL_OR;
else if (c1 == '!' && c2 == '=')
*op = OP_NOT_EQUAL_TO;
else // error
return 0;
return 2;
}
/////////////////////////////////////////////////////////////////////
// EVALUATION FUNCTIONS
__device__ void evaluateBinaryExpression(Value *operand1, Operator op, Value *operand2, Value *value) {
// Indicate an error by not setting the type on the return value
value->type = DT_UNDEFINED;
// For now only allowing comparison of the same types
if (operand1->type != operand2->type)
return;
switch (operand1->type) {
case DT_INT:
evaluateIntegerComparison(operand1->intValue, op, operand2->intValue, value);
break;
case DT_FLOAT:
evaluateFloatComparison(operand1->floatValue, op, operand2->floatValue, value);
break;
case DT_STRING:
evaluateStringComparison(operand1->stringValue, op, operand2->stringValue, value);
break;
case DT_BOOLEAN:
evaluateBooleanComparison(operand1->booleanValue, op, operand2->booleanValue, value);
break;
default:
case DT_UNDEFINED:
// do nothing
break;
}
}
__device__ void evaluateIntegerComparison(int op1, Operator op, int op2, Value *value) {
value->type = DT_BOOLEAN;
int bv = 0; // assume comparison is false
switch (op) {
case OP_EQUAL_TO:
if (op1 == op2) bv = 1;
break;
case OP_GREATER_THAN:
if (op1 > op2) bv = 1;
break;
case OP_GREATER_THAN_OR_EQUAL_TO:
if (op1 >= op2) bv = 1;
break;
case OP_LESS_THAN:
if (op1 < op2) bv = 1;
break;
case OP_LESS_THAN_OR_EQUAL_TO:
if (op1 <= op2) bv = 1;
break;
case OP_LOGICAL_AND:
bv = op1 && op2;
break;
case OP_LOGICAL_OR:
bv = op1 || op2;
break;
case OP_NOT_EQUAL_TO:
if (op1 != op2) bv = 1;
break;
default:
case OP_UNDEFINED:
break;
}
value->booleanValue = bv;
}
__device__ void evaluateFloatComparison(float op1, Operator op, float op2, Value *value) {
value->type = DT_BOOLEAN;
int bv = 0; // assume comparison is false
switch (op) {
case OP_EQUAL_TO:
if (op1 == op2) bv = 1;
break;
case OP_GREATER_THAN:
if (op1 > op2) bv = 1;
break;
case OP_GREATER_THAN_OR_EQUAL_TO:
if (op1 >= op2) bv = 1;
break;
case OP_LESS_THAN:
if (op1 < op2) bv = 1;
break;
case OP_LESS_THAN_OR_EQUAL_TO:
if (op1 <= op2) bv = 1;
break;
case OP_LOGICAL_AND:
bv = op1 && op2;
break;
case OP_LOGICAL_OR:
bv = op1 || op2;
break;
case OP_NOT_EQUAL_TO:
if (op1 != op2) bv = 1;
break;
default:
case OP_UNDEFINED:
return;
}
value->booleanValue = bv;
}
__device__ void evaluateStringComparison(char *op1, Operator op, char *op2, Value *value) {
// Because time is short, we'll have to skimp on the string comparisons
// The greater than and less than operations require a lexical comparison,
// and we don't have access to the standard C library (and thus strcmp()).
// I'm not not going to write my own strcmp() function, so equality is the
// only operation we're going to support for now.
value->type = DT_BOOLEAN;
int bv = 0;
switch (op) {
case OP_EQUAL_TO:
if (dstreql(op1, op2) == 1) bv = 1;
break;
case OP_NOT_EQUAL_TO:
if (dstreql(op1, op2) == 0) bv = 1;
break;
default:
case OP_GREATER_THAN:
case OP_GREATER_THAN_OR_EQUAL_TO:
case OP_LESS_THAN:
case OP_LESS_THAN_OR_EQUAL_TO:
case OP_LOGICAL_AND:
case OP_LOGICAL_OR:
case OP_UNDEFINED:
break;
}
value->booleanValue = bv;
}
__device__ void evaluateBooleanComparison(int op1, Operator op, int op2, Value *value) {
value->type = DT_BOOLEAN;
int bv = 0;
switch (op) {
case OP_EQUAL_TO:
if (op1 == op2) bv = 1;
break;
case OP_LOGICAL_AND:
bv = op1 && op2;
break;
case OP_LOGICAL_OR:
bv = op1 || op2;
break;
case OP_NOT_EQUAL_TO:
if (op1 != op2) bv = 1;
break;
default:
case OP_GREATER_THAN:
case OP_GREATER_THAN_OR_EQUAL_TO:
case OP_LESS_THAN:
case OP_LESS_THAN_OR_EQUAL_TO:
case OP_UNDEFINED:
break;
}
value->booleanValue = bv;
}
/////////////////////////////////////////////////////////////////////
// STRING FUNCTIONS
__device__ int dstrlen(char *str) {
int len = 0;
while (*str != '\0') {
str++;
len++;
}
return len;
}
__device__ int dstreql(char *str1, char *str2) {
while (*str1 == *str2 && *str1 != '\0' && *str2 != '\0') {
str1++;
str2++;
}
if (*str1 == '\0' && *str2 == '\0')
return 1;
return 0;
} | .text
.file "decisionListKernel.hip"
.globl __device_stub__processDecisionLists # -- Begin function __device_stub__processDecisionLists
.p2align 4, 0x90
.type __device_stub__processDecisionLists,@function
__device_stub__processDecisionLists: # @__device_stub__processDecisionLists
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movl %edi, 12(%rsp)
movq %rsi, 72(%rsp)
movq %rdx, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 72(%rsp), %rax
movq %rax, 88(%rsp)
leaq 64(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $processDecisionLists, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size __device_stub__processDecisionLists, .Lfunc_end0-__device_stub__processDecisionLists
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $processDecisionLists, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type processDecisionLists,@object # @processDecisionLists
.section .rodata,"a",@progbits
.globl processDecisionLists
.p2align 3, 0x0
processDecisionLists:
.quad __device_stub__processDecisionLists
.size processDecisionLists, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "processDecisionLists"
.size .L__unnamed_1, 21
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __device_stub__processDecisionLists
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym processDecisionLists
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0019d4a9_00000000-6_decisionListKernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2045:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2045:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z21parseDecisionListNodePcP11DLNodeValue
.type _Z21parseDecisionListNodePcP11DLNodeValue, @function
_Z21parseDecisionListNodePcP11DLNodeValue:
.LFB2027:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2027:
.size _Z21parseDecisionListNodePcP11DLNodeValue, .-_Z21parseDecisionListNodePcP11DLNodeValue
.globl _Z15parseExpressionPcP5Value
.type _Z15parseExpressionPcP5Value, @function
_Z15parseExpressionPcP5Value:
.LFB2028:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2028:
.size _Z15parseExpressionPcP5Value, .-_Z15parseExpressionPcP5Value
.globl _Z21parseBinaryExpressionPcP5Value
.type _Z21parseBinaryExpressionPcP5Value, @function
_Z21parseBinaryExpressionPcP5Value:
.LFB2029:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2029:
.size _Z21parseBinaryExpressionPcP5Value, .-_Z21parseBinaryExpressionPcP5Value
.globl _Z23parseVariableExpressionPcP5Value
.type _Z23parseVariableExpressionPcP5Value, @function
_Z23parseVariableExpressionPcP5Value:
.LFB2030:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2030:
.size _Z23parseVariableExpressionPcP5Value, .-_Z23parseVariableExpressionPcP5Value
.globl _Z20parseBooleanConstantPcP5Value
.type _Z20parseBooleanConstantPcP5Value, @function
_Z20parseBooleanConstantPcP5Value:
.LFB2031:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2031:
.size _Z20parseBooleanConstantPcP5Value, .-_Z20parseBooleanConstantPcP5Value
.globl _Z20parseIntegerConstantPcP5Value
.type _Z20parseIntegerConstantPcP5Value, @function
_Z20parseIntegerConstantPcP5Value:
.LFB2032:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2032:
.size _Z20parseIntegerConstantPcP5Value, .-_Z20parseIntegerConstantPcP5Value
.globl _Z18parseFloatConstantPcP5Value
.type _Z18parseFloatConstantPcP5Value, @function
_Z18parseFloatConstantPcP5Value:
.LFB2033:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2033:
.size _Z18parseFloatConstantPcP5Value, .-_Z18parseFloatConstantPcP5Value
.globl _Z19parseStringConstantPcP5Value
.type _Z19parseStringConstantPcP5Value, @function
_Z19parseStringConstantPcP5Value:
.LFB2034:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2034:
.size _Z19parseStringConstantPcP5Value, .-_Z19parseStringConstantPcP5Value
.globl _Z13parseOperatorPcP8Operator
.type _Z13parseOperatorPcP8Operator, @function
_Z13parseOperatorPcP8Operator:
.LFB2035:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2035:
.size _Z13parseOperatorPcP8Operator, .-_Z13parseOperatorPcP8Operator
.globl _Z24evaluateBinaryExpressionP5Value8OperatorS0_S0_
.type _Z24evaluateBinaryExpressionP5Value8OperatorS0_S0_, @function
_Z24evaluateBinaryExpressionP5Value8OperatorS0_S0_:
.LFB2036:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2036:
.size _Z24evaluateBinaryExpressionP5Value8OperatorS0_S0_, .-_Z24evaluateBinaryExpressionP5Value8OperatorS0_S0_
.globl _Z25evaluateIntegerComparisoni8OperatoriP5Value
.type _Z25evaluateIntegerComparisoni8OperatoriP5Value, @function
_Z25evaluateIntegerComparisoni8OperatoriP5Value:
.LFB2037:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2037:
.size _Z25evaluateIntegerComparisoni8OperatoriP5Value, .-_Z25evaluateIntegerComparisoni8OperatoriP5Value
.globl _Z23evaluateFloatComparisonf8OperatorfP5Value
.type _Z23evaluateFloatComparisonf8OperatorfP5Value, @function
_Z23evaluateFloatComparisonf8OperatorfP5Value:
.LFB2038:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2038:
.size _Z23evaluateFloatComparisonf8OperatorfP5Value, .-_Z23evaluateFloatComparisonf8OperatorfP5Value
.globl _Z24evaluateStringComparisonPc8OperatorS_P5Value
.type _Z24evaluateStringComparisonPc8OperatorS_P5Value, @function
_Z24evaluateStringComparisonPc8OperatorS_P5Value:
.LFB2039:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2039:
.size _Z24evaluateStringComparisonPc8OperatorS_P5Value, .-_Z24evaluateStringComparisonPc8OperatorS_P5Value
.globl _Z25evaluateBooleanComparisoni8OperatoriP5Value
.type _Z25evaluateBooleanComparisoni8OperatoriP5Value, @function
_Z25evaluateBooleanComparisoni8OperatoriP5Value:
.LFB2040:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2040:
.size _Z25evaluateBooleanComparisoni8OperatoriP5Value, .-_Z25evaluateBooleanComparisoni8OperatoriP5Value
.globl _Z7dstrlenPc
.type _Z7dstrlenPc, @function
_Z7dstrlenPc:
.LFB2041:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2041:
.size _Z7dstrlenPc, .-_Z7dstrlenPc
.globl _Z7dstreqlPcS_
.type _Z7dstreqlPcS_, @function
_Z7dstreqlPcS_:
.LFB2042:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2042:
.size _Z7dstreqlPcS_, .-_Z7dstreqlPcS_
.globl _Z44__device_stub__Z20processDecisionListsiPPcPiiPPcPi
.type _Z44__device_stub__Z20processDecisionListsiPPcPiiPPcPi, @function
_Z44__device_stub__Z20processDecisionListsiPPcPiiPPcPi:
.LFB2067:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movl %edi, 28(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L39
.L35:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L40
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L39:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq processDecisionLists(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L35
.L40:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2067:
.size _Z44__device_stub__Z20processDecisionListsiPPcPiiPPcPi, .-_Z44__device_stub__Z20processDecisionListsiPPcPiiPPcPi
.globl processDecisionLists
.type processDecisionLists, @function
processDecisionLists:
.LFB2068:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z44__device_stub__Z20processDecisionListsiPPcPiiPPcPi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2068:
.size processDecisionLists, .-processDecisionLists
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "processDecisionLists"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2070:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq processDecisionLists(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2070:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "decisionListKernel.hip"
.globl __device_stub__processDecisionLists # -- Begin function __device_stub__processDecisionLists
.p2align 4, 0x90
.type __device_stub__processDecisionLists,@function
__device_stub__processDecisionLists: # @__device_stub__processDecisionLists
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movl %edi, 12(%rsp)
movq %rsi, 72(%rsp)
movq %rdx, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 72(%rsp), %rax
movq %rax, 88(%rsp)
leaq 64(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $processDecisionLists, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size __device_stub__processDecisionLists, .Lfunc_end0-__device_stub__processDecisionLists
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $processDecisionLists, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type processDecisionLists,@object # @processDecisionLists
.section .rodata,"a",@progbits
.globl processDecisionLists
.p2align 3, 0x0
processDecisionLists:
.quad __device_stub__processDecisionLists
.size processDecisionLists, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "processDecisionLists"
.size .L__unnamed_1, 21
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __device_stub__processDecisionLists
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym processDecisionLists
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "includes.h"
/*
Vector addition with a single thread for each addition
*/
/*
Vector addition with thread mapping and thread accessing its neighbor parallely
*/
//slower than simpler
/*
Matrix Matrix multiplication with a single thread for each row
*/
/*
Matrix Matrix multiplication with a single thread for each result element
*/
/*
Matrix Vector multiplication with a block with 4 threads per block, shared block mem and parallel reduce
*/
__global__ void matrix_matrix_mul_old(int *a, int *b, int *c, int n_row, int n_col, int n_comm)
{
int tid= threadIdx.x + blockIdx.x * blockDim.x;
int temp=0;
while(tid<n_row)
{
for (int k=0;k<n_col;k++)
{
temp=0;
for(int j=0;j<n_comm;j++)
{
temp+= a[n_comm*tid+j]* b[j*n_col+k];
}
c[tid*n_col+k]=temp;
}
tid+=blockDim.x * gridDim.x;
}
} | code for sm_80
Function : _Z21matrix_matrix_mul_oldPiS_S_iii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R7, SR_CTAID.X ; /* 0x0000000000077919 */
/* 0x000e280000002500 */
/*0020*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R7, R7, c[0x0][0x0], R0 ; /* 0x0000000007077a24 */
/* 0x001fca00078e0200 */
/*0040*/ ISETP.GE.AND P0, PT, R7, c[0x0][0x178], PT ; /* 0x00005e0007007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ ISETP.LT.AND P0, PT, RZ, c[0x0][0x17c], PT ; /* 0x00005f00ff007a0c */
/* 0x000fda0003f01270 */
/*0070*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*0080*/ HFMA2.MMA R0, -RZ, RZ, 0, 5.9604644775390625e-08 ; /* 0x00000001ff007435 */
/* 0x000fe200000001ff */
/*0090*/ MOV R9, c[0x0][0x180] ; /* 0x0000600000097a02 */
/* 0x000fe20000000f00 */
/*00a0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*00b0*/ MOV R8, c[0x0][0x17c] ; /* 0x00005f0000087a02 */
/* 0x000fe40000000f00 */
/*00c0*/ LOP3.LUT R9, R9, 0x3, RZ, 0xc0, !PT ; /* 0x0000000309097812 */
/* 0x000fe400078ec0ff */
/*00d0*/ LOP3.LUT R8, R8, 0x3, RZ, 0xc0, !PT ; /* 0x0000000308087812 */
/* 0x000fe400078ec0ff */
/*00e0*/ IADD3 R6, -R9, c[0x0][0x180], RZ ; /* 0x0000600009067a10 */
/* 0x000fe40007ffe1ff */
/*00f0*/ IADD3 R2, -R0, c[0x0][0x17c], RZ ; /* 0x00005f0000027a10 */
/* 0x000fc40007ffe1ff */
/*0100*/ IADD3 R0, -R0, c[0x0][0x180], RZ ; /* 0x0000600000007a10 */
/* 0x000fe40007ffe1ff */
/*0110*/ ISETP.GE.U32.AND P1, PT, R2, 0x3, PT ; /* 0x000000030200780c */
/* 0x000fe40003f26070 */
/*0120*/ ISETP.GE.U32.AND P2, PT, R0, 0x3, PT ; /* 0x000000030000780c */
/* 0x000fe40003f46070 */
/*0130*/ IADD3 R4, -R8, c[0x0][0x17c], RZ ; /* 0x00005f0008047a10 */
/* 0x000fe40007ffe1ff */
/*0140*/ ISETP.LT.AND P0, PT, RZ, c[0x0][0x180], PT ; /* 0x00006000ff007a0c */
/* 0x000fe20003f01270 */
/*0150*/ IMAD R2, R7, c[0x0][0x17c], RZ ; /* 0x00005f0007027a24 */
/* 0x001fe200078e02ff */
/*0160*/ MOV R0, c[0x0][0x0] ; /* 0x0000000000007a02 */
/* 0x000fe40000000f00 */
/*0170*/ MOV R5, R7 ; /* 0x0000000700057202 */
/* 0x000fc60000000f00 */
/*0180*/ IMAD R7, R0, c[0x0][0xc], R7 ; /* 0x0000030000077a24 */
/* 0x000fca00078e0207 */
/*0190*/ ISETP.GE.AND P3, PT, R7, c[0x0][0x178], PT ; /* 0x00005e0007007a0c */
/* 0x000fe20003f66270 */
/*01a0*/ @P0 BRA 0x6b0 ; /* 0x0000050000000947 */
/* 0x000fea0003800000 */
/*01b0*/ HFMA2.MMA R3, -RZ, RZ, 0, 0 ; /* 0x00000000ff037435 */
/* 0x000fe200000001ff */
/*01c0*/ @!P1 BRA 0x5e0 ; /* 0x0000041000009947 */
/* 0x000fec0003800000 */
/*01d0*/ ISETP.GT.AND P0, PT, R4, RZ, PT ; /* 0x000000ff0400720c */
/* 0x000fe40003f04270 */
/*01e0*/ MOV R11, 0x4 ; /* 0x00000004000b7802 */
/* 0x000fe40000000f00 */
/*01f0*/ MOV R3, RZ ; /* 0x000000ff00037202 */
/* 0x000fe40000000f00 */
/*0200*/ MOV R0, R4 ; /* 0x0000000400007202 */
/* 0x000fe20000000f00 */
/*0210*/ IMAD.WIDE R10, R2, R11, c[0x0][0x170] ; /* 0x00005c00020a7625 */
/* 0x000fcc00078e020b */
/*0220*/ @!P0 BRA 0x520 ; /* 0x000002f000008947 */
/* 0x000fea0003800000 */
/*0230*/ ISETP.GT.AND P4, PT, R0, 0xc, PT ; /* 0x0000000c0000780c */
/* 0x000fe40003f84270 */
/*0240*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */
/* 0x000fd60003f0f070 */
/*0250*/ @!P4 BRA 0x3f0 ; /* 0x000001900000c947 */
/* 0x000fea0003800000 */
/*0260*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40003f0e170 */
/*0270*/ IADD3 R0, R0, -0x10, RZ ; /* 0xfffffff000007810 */
/* 0x000fe20007ffe0ff */
/*0280*/ STG.E [R10.64], RZ ; /* 0x000000ff0a007986 */
/* 0x000fe2000c101904 */
/*0290*/ IADD3 R5, P5, R10, 0x40, RZ ; /* 0x000000400a057810 */
/* 0x000fe40007fbe0ff */
/*02a0*/ ISETP.GT.AND P4, PT, R0, 0xc, PT ; /* 0x0000000c0000780c */
/* 0x000fe20003f84270 */
/*02b0*/ STG.E [R10.64+0x4], RZ ; /* 0x000004ff0a007986 */
/* 0x000fe2000c101904 */
/*02c0*/ IADD3.X R12, RZ, R11, RZ, P5, !PT ; /* 0x0000000bff0c7210 */
/* 0x000fe40002ffe4ff */
/*02d0*/ IADD3 R3, R3, 0x10, RZ ; /* 0x0000001003037810 */
/* 0x000fe20007ffe0ff */
/*02e0*/ STG.E [R10.64+0x8], RZ ; /* 0x000008ff0a007986 */
/* 0x000fe8000c101904 */
/*02f0*/ STG.E [R10.64+0xc], RZ ; /* 0x00000cff0a007986 */
/* 0x000fe8000c101904 */
/*0300*/ STG.E [R10.64+0x10], RZ ; /* 0x000010ff0a007986 */
/* 0x000fe8000c101904 */
/*0310*/ STG.E [R10.64+0x14], RZ ; /* 0x000014ff0a007986 */
/* 0x000fe8000c101904 */
/*0320*/ STG.E [R10.64+0x18], RZ ; /* 0x000018ff0a007986 */
/* 0x000fe8000c101904 */
/*0330*/ STG.E [R10.64+0x1c], RZ ; /* 0x00001cff0a007986 */
/* 0x000fe8000c101904 */
/*0340*/ STG.E [R10.64+0x20], RZ ; /* 0x000020ff0a007986 */
/* 0x000fe8000c101904 */
/*0350*/ STG.E [R10.64+0x24], RZ ; /* 0x000024ff0a007986 */
/* 0x000fe8000c101904 */
/*0360*/ STG.E [R10.64+0x28], RZ ; /* 0x000028ff0a007986 */
/* 0x000fe8000c101904 */
/*0370*/ STG.E [R10.64+0x2c], RZ ; /* 0x00002cff0a007986 */
/* 0x000fe8000c101904 */
/*0380*/ STG.E [R10.64+0x30], RZ ; /* 0x000030ff0a007986 */
/* 0x000fe8000c101904 */
/*0390*/ STG.E [R10.64+0x34], RZ ; /* 0x000034ff0a007986 */
/* 0x000fe8000c101904 */
/*03a0*/ STG.E [R10.64+0x38], RZ ; /* 0x000038ff0a007986 */
/* 0x000fe8000c101904 */
/*03b0*/ STG.E [R10.64+0x3c], RZ ; /* 0x00003cff0a007986 */
/* 0x0001e4000c101904 */
/*03c0*/ MOV R10, R5 ; /* 0x00000005000a7202 */
/* 0x001fc40000000f00 */
/*03d0*/ MOV R11, R12 ; /* 0x0000000c000b7202 */
/* 0x000fe20000000f00 */
/*03e0*/ @P4 BRA 0x270 ; /* 0xfffffe8000004947 */
/* 0x000fea000383ffff */
/*03f0*/ ISETP.GT.AND P4, PT, R0, 0x4, PT ; /* 0x000000040000780c */
/* 0x000fda0003f84270 */
/*0400*/ @!P4 BRA 0x500 ; /* 0x000000f00000c947 */
/* 0x000fea0003800000 */
/*0410*/ IADD3 R5, P4, R10, 0x20, RZ ; /* 0x000000200a057810 */
/* 0x000fe20007f9e0ff */
/*0420*/ STG.E [R10.64], RZ ; /* 0x000000ff0a007986 */
/* 0x000fe2000c101904 */
/*0430*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40003f0e170 */
/*0440*/ IADD3.X R12, RZ, R11, RZ, P4, !PT ; /* 0x0000000bff0c7210 */
/* 0x000fe200027fe4ff */
/*0450*/ STG.E [R10.64+0x4], RZ ; /* 0x000004ff0a007986 */
/* 0x000fe2000c101904 */
/*0460*/ IADD3 R3, R3, 0x8, RZ ; /* 0x0000000803037810 */
/* 0x000fe40007ffe0ff */
/*0470*/ IADD3 R0, R0, -0x8, RZ ; /* 0xfffffff800007810 */
/* 0x000fe20007ffe0ff */
/*0480*/ STG.E [R10.64+0x8], RZ ; /* 0x000008ff0a007986 */
/* 0x000fe8000c101904 */
/*0490*/ STG.E [R10.64+0xc], RZ ; /* 0x00000cff0a007986 */
/* 0x000fe8000c101904 */
/*04a0*/ STG.E [R10.64+0x10], RZ ; /* 0x000010ff0a007986 */
/* 0x000fe8000c101904 */
/*04b0*/ STG.E [R10.64+0x14], RZ ; /* 0x000014ff0a007986 */
/* 0x000fe8000c101904 */
/*04c0*/ STG.E [R10.64+0x18], RZ ; /* 0x000018ff0a007986 */
/* 0x000fe8000c101904 */
/*04d0*/ STG.E [R10.64+0x1c], RZ ; /* 0x00001cff0a007986 */
/* 0x0001e4000c101904 */
/*04e0*/ MOV R10, R5 ; /* 0x00000005000a7202 */
/* 0x001fc40000000f00 */
/*04f0*/ MOV R11, R12 ; /* 0x0000000c000b7202 */
/* 0x000fe40000000f00 */
/*0500*/ ISETP.NE.OR P0, PT, R0, RZ, P0 ; /* 0x000000ff0000720c */
/* 0x000fda0000705670 */
/*0510*/ @!P0 BRA 0x5e0 ; /* 0x000000c000008947 */
/* 0x000fea0003800000 */
/*0520*/ IADD3 R0, R0, -0x4, RZ ; /* 0xfffffffc00007810 */
/* 0x000fe20007ffe0ff */
/*0530*/ STG.E [R10.64], RZ ; /* 0x000000ff0a007986 */
/* 0x000fe2000c101904 */
/*0540*/ IADD3 R5, P4, R10, 0x10, RZ ; /* 0x000000100a057810 */
/* 0x000fe40007f9e0ff */
/*0550*/ ISETP.NE.AND P0, PT, R0, RZ, PT ; /* 0x000000ff0000720c */
/* 0x000fe20003f05270 */
/*0560*/ STG.E [R10.64+0x4], RZ ; /* 0x000004ff0a007986 */
/* 0x000fe2000c101904 */
/*0570*/ IADD3.X R12, RZ, R11, RZ, P4, !PT ; /* 0x0000000bff0c7210 */
/* 0x000fe400027fe4ff */
/*0580*/ IADD3 R3, R3, 0x4, RZ ; /* 0x0000000403037810 */
/* 0x000fe20007ffe0ff */
/*0590*/ STG.E [R10.64+0x8], RZ ; /* 0x000008ff0a007986 */
/* 0x000fe8000c101904 */
/*05a0*/ STG.E [R10.64+0xc], RZ ; /* 0x00000cff0a007986 */
/* 0x0001e4000c101904 */
/*05b0*/ MOV R10, R5 ; /* 0x00000005000a7202 */
/* 0x001fc40000000f00 */
/*05c0*/ MOV R11, R12 ; /* 0x0000000c000b7202 */
/* 0x000fe20000000f00 */
/*05d0*/ @P0 BRA 0x520 ; /* 0xffffff4000000947 */
/* 0x000fea000383ffff */
/*05e0*/ ISETP.NE.AND P0, PT, R8, RZ, PT ; /* 0x000000ff0800720c */
/* 0x000fda0003f05270 */
/*05f0*/ @!P0 BRA 0x1310 ; /* 0x00000d1000008947 */
/* 0x000fea0003800000 */
/*0600*/ IADD3 R2, R2, R3, RZ ; /* 0x0000000302027210 */
/* 0x000fe20007ffe0ff */
/*0610*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fe200000001ff */
/*0620*/ ISETP.NE.AND P0, PT, R8, 0x1, PT ; /* 0x000000010800780c */
/* 0x000fd20003f05270 */
/*0630*/ IMAD.WIDE R2, R2, R3, c[0x0][0x170] ; /* 0x00005c0002027625 */
/* 0x000fca00078e0203 */
/*0640*/ STG.E [R2.64], RZ ; /* 0x000000ff02007986 */
/* 0x0001e2000c101904 */
/*0650*/ @!P0 BRA 0x1310 ; /* 0x00000cb000008947 */
/* 0x000fea0003800000 */
/*0660*/ STG.E [R2.64+0x4], RZ ; /* 0x000004ff02007986 */
/* 0x0003e2000c101904 */
/*0670*/ ISETP.NE.AND P0, PT, R8, 0x2, PT ; /* 0x000000020800780c */
/* 0x000fda0003f05270 */
/*0680*/ @!P0 BRA 0x1310 ; /* 0x00000c8000008947 */
/* 0x000fea0003800000 */
/*0690*/ STG.E [R2.64+0x8], RZ ; /* 0x000008ff02007986 */
/* 0x0023e2000c101904 */
/*06a0*/ BRA 0x1310 ; /* 0x00000c6000007947 */
/* 0x000fea0003800000 */
/*06b0*/ MOV R3, RZ ; /* 0x000000ff00037202 */
/* 0x000fce0000000f00 */
/*06c0*/ HFMA2.MMA R0, -RZ, RZ, 0, 0 ; /* 0x00000000ff007435 */
/* 0x000fe200000001ff */
/*06d0*/ MOV R14, RZ ; /* 0x000000ff000e7202 */
/* 0x001fe20000000f00 */
/*06e0*/ @!P2 BRA 0x1130 ; /* 0x00000a400000a947 */
/* 0x000fea0003800000 */
/*06f0*/ ISETP.GT.AND P0, PT, R6, RZ, PT ; /* 0x000000ff0600720c */
/* 0x000fe40003f04270 */
/*0700*/ MOV R0, RZ ; /* 0x000000ff00007202 */
/* 0x000fe40000000f00 */
/*0710*/ MOV R14, RZ ; /* 0x000000ff000e7202 */
/* 0x000fe40000000f00 */
/*0720*/ MOV R10, R6 ; /* 0x00000006000a7202 */
/* 0x000fce0000000f00 */
/*0730*/ @!P0 BRA 0xfb0 ; /* 0x0000087000008947 */
/* 0x000fea0003800000 */
/*0740*/ ISETP.GT.AND P4, PT, R10, 0xc, PT ; /* 0x0000000c0a00780c */
/* 0x000fe40003f84270 */
/*0750*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */
/* 0x000fd60003f0f070 */
/*0760*/ @!P4 BRA 0xcc0 ; /* 0x000005500000c947 */
/* 0x000fea0003800000 */
/*0770*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40003f0e170 */
/*0780*/ HFMA2.MMA R11, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff0b7435 */
/* 0x000fe200000001ff */
/*0790*/ IMAD R24, R0, c[0x0][0x17c], R3 ; /* 0x00005f0000187a24 */
/* 0x000fe400078e0203 */
/*07a0*/ IMAD R28, R5, c[0x0][0x180], R0 ; /* 0x00006000051c7a24 */
/* 0x000fce00078e0200 */
/*07b0*/ IMAD.WIDE R24, R24, R11, c[0x0][0x168] ; /* 0x00005a0018187625 */
/* 0x000fc800078e020b */
/*07c0*/ IMAD.WIDE R28, R28, R11, c[0x0][0x160] ; /* 0x000058001c1c7625 */
/* 0x000fe200078e020b */
/*07d0*/ LDG.E R15, [R24.64] ; /* 0x00000004180f7981 */
/* 0x0000a6000c1e1900 */
/*07e0*/ IMAD.WIDE R20, R11.reuse, c[0x0][0x17c], R24 ; /* 0x00005f000b147a25 */
/* 0x040fe200078e0218 */
/*07f0*/ LDG.E R27, [R28.64] ; /* 0x000000041c1b7981 */
/* 0x0002a8000c1e1900 */
/*0800*/ LDG.E R16, [R20.64] ; /* 0x0000000414107981 */
/* 0x000722000c1e1900 */
/*0810*/ IMAD.WIDE R12, R11, c[0x0][0x17c], R20 ; /* 0x00005f000b0c7a25 */
/* 0x000fc600078e0214 */
/*0820*/ LDG.E R17, [R28.64+0x4] ; /* 0x000004041c117981 */
/* 0x000322000c1e1900 */
/*0830*/ IADD3 R26, R0, 0x4, RZ ; /* 0x00000004001a7810 */
/* 0x000fc60007ffe0ff */
/*0840*/ LDG.E R18, [R28.64+0x8] ; /* 0x000008041c127981 */
/* 0x000368000c1e1900 */
/*0850*/ LDG.E R19, [R12.64] ; /* 0x000000040c137981 */
/* 0x000362000c1e1900 */
/*0860*/ IMAD.WIDE R22, R11, c[0x0][0x17c], R12 ; /* 0x00005f000b167a25 */
/* 0x000fc600078e020c */
/*0870*/ LDG.E R24, [R28.64+0xc] ; /* 0x00000c041c187981 */
/* 0x001168000c1e1900 */
/*0880*/ LDG.E R23, [R22.64] ; /* 0x0000000416177981 */
/* 0x000162000c1e1900 */
/*0890*/ IMAD R12, R5, c[0x0][0x180], R26 ; /* 0x00006000050c7a24 */
/* 0x002fe400078e021a */
/*08a0*/ IMAD R26, R26, c[0x0][0x17c], R3 ; /* 0x00005f001a1a7a24 */
/* 0x000fe400078e0203 */
/*08b0*/ IMAD.WIDE R12, R12, R11, c[0x0][0x160] ; /* 0x000058000c0c7625 */
/* 0x000fc800078e020b */
/*08c0*/ IMAD.WIDE R20, R26, R11, c[0x0][0x168] ; /* 0x00005a001a147625 */
/* 0x008fe200078e020b */
/*08d0*/ LDG.E R25, [R12.64] ; /* 0x000000040c197981 */
/* 0x0002e8000c1e1900 */
/*08e0*/ LDG.E R26, [R20.64] ; /* 0x00000004141a7981 */
/* 0x0008e8000c1e1900 */
/*08f0*/ LDG.E R22, [R12.64+0x4] ; /* 0x000004040c167981 */
/* 0x0012e8000c1e1900 */
/*0900*/ LDG.E R29, [R12.64+0x8] ; /* 0x000008040c1d7981 */
/* 0x0002e8000c1e1900 */
/*0910*/ LDG.E R12, [R12.64+0xc] ; /* 0x00000c040c0c7981 */
/* 0x0020e2000c1e1900 */
/*0920*/ IMAD R27, R15, R27, R14 ; /* 0x0000001b0f1b7224 */
/* 0x004fc400078e020e */
/*0930*/ IMAD.WIDE R14, R11, c[0x0][0x17c], R20 ; /* 0x00005f000b0e7a25 */
/* 0x000fc800078e0214 */
/*0940*/ IMAD R20, R16, R17, R27 ; /* 0x0000001110147224 */
/* 0x010fe400078e021b */
/*0950*/ IMAD.WIDE R16, R11, c[0x0][0x17c], R14 ; /* 0x00005f000b107a25 */
/* 0x000fe200078e020e */
/*0960*/ LDG.E R27, [R14.64] ; /* 0x000000040e1b7981 */
/* 0x000aa8000c1e1900 */
/*0970*/ LDG.E R28, [R16.64] ; /* 0x00000004101c7981 */
/* 0x000322000c1e1900 */
/*0980*/ IMAD R14, R19, R18, R20 ; /* 0x00000012130e7224 */
/* 0x020fe400078e0214 */
/*0990*/ IMAD.WIDE R18, R11, c[0x0][0x17c], R16 ; /* 0x00005f000b127a25 */
/* 0x000fe200078e0210 */
/*09a0*/ IADD3 R16, R0, 0x8, RZ ; /* 0x0000000800107810 */
/* 0x002fc60007ffe0ff */
/*09b0*/ IMAD R23, R23, R24, R14 ; /* 0x0000001817177224 */
/* 0x000fe200078e020e */
/*09c0*/ LDG.E R13, [R18.64] ; /* 0x00000004120d7981 */
/* 0x001762000c1e1900 */
/*09d0*/ IMAD R20, R5, c[0x0][0x180], R16 ; /* 0x0000600005147a24 */
/* 0x000fe400078e0210 */
/*09e0*/ IMAD R16, R16, c[0x0][0x17c], R3 ; /* 0x00005f0010107a24 */
/* 0x000fe400078e0203 */
/*09f0*/ IMAD.WIDE R20, R20, R11, c[0x0][0x160] ; /* 0x0000580014147625 */
/* 0x000fc800078e020b */
/*0a00*/ IMAD.WIDE R14, R16, R11, c[0x0][0x168] ; /* 0x00005a00100e7625 */
/* 0x000fc800078e020b */
/*0a10*/ IMAD R18, R26, R25, R23 ; /* 0x000000191a127224 */
/* 0x008fe200078e0217 */
/*0a20*/ LDG.E R24, [R14.64] ; /* 0x000000040e187981 */
/* 0x0000e2000c1e1900 */
/*0a30*/ IMAD.WIDE R16, R11, c[0x0][0x17c], R14 ; /* 0x00005f000b107a25 */
/* 0x000fc600078e020e */
/*0a40*/ LDG.E R23, [R20.64] ; /* 0x0000000414177981 */
/* 0x000ee8000c1e1900 */
/*0a50*/ LDG.E R25, [R20.64+0x4] ; /* 0x0000040414197981 */
/* 0x0002e8000c1e1900 */
/*0a60*/ LDG.E R26, [R16.64] ; /* 0x00000004101a7981 */
/* 0x0000e2000c1e1900 */
/*0a70*/ IMAD R18, R27, R22, R18 ; /* 0x000000161b127224 */
/* 0x004fc600078e0212 */
/*0a80*/ LDG.E R22, [R20.64+0x8] ; /* 0x0000080414167981 */
/* 0x0002a2000c1e1900 */
/*0a90*/ IMAD R29, R28, R29, R18 ; /* 0x0000001d1c1d7224 */
/* 0x010fe200078e0212 */
/*0aa0*/ IADD3 R28, R0, 0xc, RZ ; /* 0x0000000c001c7810 */
/* 0x000fe20007ffe0ff */
/*0ab0*/ IMAD.WIDE R18, R11, c[0x0][0x17c], R16 ; /* 0x00005f000b127a25 */
/* 0x000fe200078e0210 */
/*0ac0*/ LDG.E R27, [R20.64+0xc] ; /* 0x00000c04141b7981 */
/* 0x000326000c1e1900 */
/*0ad0*/ IMAD R14, R28, c[0x0][0x17c], R3 ; /* 0x00005f001c0e7a24 */
/* 0x001fc800078e0203 */
/*0ae0*/ IMAD.WIDE R14, R14, R11, c[0x0][0x168] ; /* 0x00005a000e0e7625 */
/* 0x000fc800078e020b */
/*0af0*/ IMAD R29, R13, R12, R29 ; /* 0x0000000c0d1d7224 */
/* 0x020fe400078e021d */
/*0b00*/ IMAD.WIDE R12, R11, c[0x0][0x17c], R18 ; /* 0x00005f000b0c7a25 */
/* 0x000fe400078e0212 */
/*0b10*/ LDG.E R19, [R18.64] ; /* 0x0000000412137981 */
/* 0x0000a4000c1e1900 */
/*0b20*/ IMAD R28, R5, c[0x0][0x180], R28 ; /* 0x00006000051c7a24 */
/* 0x000fe400078e021c */
/*0b30*/ IMAD.WIDE R16, R11, c[0x0][0x17c], R14 ; /* 0x00005f000b107a25 */
/* 0x000fe400078e020e */
/*0b40*/ LDG.E R14, [R14.64] ; /* 0x000000040e0e7981 */
/* 0x000b28000c1e1900 */
/*0b50*/ LDG.E R18, [R12.64] ; /* 0x000000040c127981 */
/* 0x001122000c1e1900 */
/*0b60*/ IMAD R23, R24, R23, R29 ; /* 0x0000001718177224 */
/* 0x008fc400078e021d */
/*0b70*/ IMAD.WIDE R28, R28, R11, c[0x0][0x160] ; /* 0x000058001c1c7625 */
/* 0x000fc800078e020b */
/*0b80*/ IMAD.WIDE R20, R11.reuse, c[0x0][0x17c], R16 ; /* 0x00005f000b147a25 */
/* 0x042fe200078e0210 */
/*0b90*/ LDG.E R15, [R28.64+0xc] ; /* 0x00000c041c0f7981 */
/* 0x020ee6000c1e1900 */
/*0ba0*/ IMAD R25, R26, R25, R23 ; /* 0x000000191a197224 */
/* 0x000fe200078e0217 */
/*0bb0*/ LDG.E R16, [R16.64] ; /* 0x0000000410107981 */
/* 0x000f68000c1e1900 */
/*0bc0*/ LDG.E R23, [R28.64] ; /* 0x000000041c177981 */
/* 0x000ee2000c1e1900 */
/*0bd0*/ IMAD.WIDE R12, R11, c[0x0][0x17c], R20 ; /* 0x00005f000b0c7a25 */
/* 0x001fc600078e0214 */
/*0be0*/ LDG.E R26, [R28.64+0x4] ; /* 0x000004041c1a7981 */
/* 0x000f68000c1e1900 */
/*0bf0*/ LDG.E R24, [R20.64] ; /* 0x0000000414187981 */
/* 0x000f68000c1e1900 */
/*0c00*/ LDG.E R11, [R28.64+0x8] ; /* 0x000008041c0b7981 */
/* 0x000f68000c1e1900 */
/*0c10*/ LDG.E R12, [R12.64] ; /* 0x000000040c0c7981 */
/* 0x000f62000c1e1900 */
/*0c20*/ IADD3 R10, R10, -0x10, RZ ; /* 0xfffffff00a0a7810 */
/* 0x000fc80007ffe0ff */
/*0c30*/ ISETP.GT.AND P4, PT, R10, 0xc, PT ; /* 0x0000000c0a00780c */
/* 0x000fe40003f84270 */
/*0c40*/ IADD3 R0, R0, 0x10, RZ ; /* 0x0000001000007810 */
/* 0x000fe20007ffe0ff */
/*0c50*/ IMAD R19, R19, R22, R25 ; /* 0x0000001613137224 */
/* 0x004fc800078e0219 */
/*0c60*/ IMAD R18, R18, R27, R19 ; /* 0x0000001b12127224 */
/* 0x010fc800078e0213 */
/*0c70*/ IMAD R23, R14, R23, R18 ; /* 0x000000170e177224 */
/* 0x008fc800078e0212 */
/*0c80*/ IMAD R16, R16, R26, R23 ; /* 0x0000001a10107224 */
/* 0x020fc800078e0217 */
/*0c90*/ IMAD R16, R24, R11, R16 ; /* 0x0000000b18107224 */
/* 0x000fc800078e0210 */
/*0ca0*/ IMAD R14, R12, R15, R16 ; /* 0x0000000f0c0e7224 */
/* 0x000fe200078e0210 */
/*0cb0*/ @P4 BRA 0x780 ; /* 0xfffffac000004947 */
/* 0x000fea000383ffff */
/*0cc0*/ ISETP.GT.AND P4, PT, R10, 0x4, PT ; /* 0x000000040a00780c */
/* 0x000fda0003f84270 */
/*0cd0*/ @!P4 BRA 0xf90 ; /* 0x000002b00000c947 */
/* 0x000fea0003800000 */
/*0ce0*/ MOV R25, 0x4 ; /* 0x0000000400197802 */
/* 0x000fe20000000f00 */
/*0cf0*/ IMAD R18, R5, c[0x0][0x180], R0 ; /* 0x0000600005127a24 */
/* 0x000fe400078e0200 */
/*0d00*/ IMAD R20, R0, c[0x0][0x17c], R3 ; /* 0x00005f0000147a24 */
/* 0x000fe400078e0203 */
/*0d10*/ IMAD.WIDE R18, R18, R25, c[0x0][0x160] ; /* 0x0000580012127625 */
/* 0x000fc800078e0219 */
/*0d20*/ IMAD.WIDE R20, R20, R25, c[0x0][0x168] ; /* 0x00005a0014147625 */
/* 0x000fe200078e0219 */
/*0d30*/ LDG.E R29, [R18.64] ; /* 0x00000004121d7981 */
/* 0x000ea8000c1e1900 */
/*0d40*/ LDG.E R15, [R20.64] ; /* 0x00000004140f7981 */
/* 0x0000a2000c1e1900 */
/*0d50*/ IMAD.WIDE R22, R25, c[0x0][0x17c], R20 ; /* 0x00005f0019167a25 */
/* 0x000fc600078e0214 */
/*0d60*/ LDG.E R11, [R18.64+0x4] ; /* 0x00000404120b7981 */
/* 0x000ee8000c1e1900 */
/*0d70*/ LDG.E R24, [R22.64] ; /* 0x0000000416187981 */
/* 0x0002e2000c1e1900 */
/*0d80*/ IADD3 R28, R0, 0x4, RZ ; /* 0x00000004001c7810 */
/* 0x000fe20007ffe0ff */
/*0d90*/ IMAD.WIDE R12, R25, c[0x0][0x17c], R22 ; /* 0x00005f00190c7a25 */
/* 0x000fe400078e0216 */
/*0da0*/ LDG.E R26, [R18.64+0x8] ; /* 0x00000804121a7981 */
/* 0x000964000c1e1900 */
/*0db0*/ IMAD R20, R28, c[0x0][0x17c], R3 ; /* 0x00005f001c147a24 */
/* 0x001fc400078e0203 */
/*0dc0*/ IMAD.WIDE R16, R25, c[0x0][0x17c], R12 ; /* 0x00005f0019107a25 */
/* 0x000fe200078e020c */
/*0dd0*/ LDG.E R27, [R18.64+0xc] ; /* 0x00000c04121b7981 */
/* 0x000966000c1e1900 */
/*0de0*/ IMAD.WIDE R20, R20, R25.reuse, c[0x0][0x168] ; /* 0x00005a0014147625 */
/* 0x080fe200078e0219 */
/*0df0*/ LDG.E R13, [R12.64] ; /* 0x000000040c0d7981 */
/* 0x000166000c1e1900 */
/*0e00*/ IMAD R28, R5, c[0x0][0x180], R28 ; /* 0x00006000051c7a24 */
/* 0x000fe200078e021c */
/*0e10*/ LDG.E R16, [R16.64] ; /* 0x0000000410107981 */
/* 0x000166000c1e1900 */
/*0e20*/ IMAD.WIDE R22, R28, R25, c[0x0][0x160] ; /* 0x000058001c167625 */
/* 0x002fca00078e0219 */
/*0e30*/ LDG.E R28, [R22.64+0x4] ; /* 0x00000404161c7981 */
/* 0x000f68000c1e1900 */
/*0e40*/ LDG.E R17, [R22.64+0x8] ; /* 0x0000080416117981 */
/* 0x001f62000c1e1900 */
/*0e50*/ IMAD R29, R15, R29, R14 ; /* 0x0000001d0f1d7224 */
/* 0x004fe400078e020e */
/*0e60*/ IMAD.WIDE R14, R25, c[0x0][0x17c], R20 ; /* 0x00005f00190e7a25 */
/* 0x000fe400078e0214 */
/*0e70*/ LDG.E R20, [R20.64] ; /* 0x0000000414147981 */
/* 0x000ea4000c1e1900 */
/*0e80*/ IMAD R11, R24, R11, R29 ; /* 0x0000000b180b7224 */
/* 0x008fc400078e021d */
/*0e90*/ LDG.E R29, [R22.64] ; /* 0x00000004161d7981 */
/* 0x000ea2000c1e1900 */
/*0ea0*/ IMAD.WIDE R18, R25, c[0x0][0x17c], R14 ; /* 0x00005f0019127a25 */
/* 0x010fc600078e020e */
/*0eb0*/ LDG.E R14, [R14.64] ; /* 0x000000040e0e7981 */
/* 0x0000e6000c1e1900 */
/*0ec0*/ IMAD.WIDE R24, R25, c[0x0][0x17c], R18 ; /* 0x00005f0019187a25 */
/* 0x000fe200078e0212 */
/*0ed0*/ LDG.E R12, [R18.64] ; /* 0x00000004120c7981 */
/* 0x000f2a000c1e1900 */
/*0ee0*/ LDG.E R24, [R24.64] ; /* 0x0000000418187981 */
/* 0x000f28000c1e1900 */
/*0ef0*/ LDG.E R15, [R22.64+0xc] ; /* 0x00000c04160f7981 */
/* 0x001f22000c1e1900 */
/*0f00*/ IMAD R11, R13, R26, R11 ; /* 0x0000001a0d0b7224 */
/* 0x020fc800078e020b */
/*0f10*/ IMAD R11, R16, R27, R11 ; /* 0x0000001b100b7224 */
/* 0x000fe200078e020b */
/*0f20*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40003f0e170 */
/*0f30*/ IADD3 R10, R10, -0x8, RZ ; /* 0xfffffff80a0a7810 */
/* 0x000fe40007ffe0ff */
/*0f40*/ IADD3 R0, R0, 0x8, RZ ; /* 0x0000000800007810 */
/* 0x000fe20007ffe0ff */
/*0f50*/ IMAD R11, R20, R29, R11 ; /* 0x0000001d140b7224 */
/* 0x004fc800078e020b */
/*0f60*/ IMAD R11, R14, R28, R11 ; /* 0x0000001c0e0b7224 */
/* 0x008fc800078e020b */
/*0f70*/ IMAD R12, R12, R17, R11 ; /* 0x000000110c0c7224 */
/* 0x010fc800078e020b */
/*0f80*/ IMAD R14, R24, R15, R12 ; /* 0x0000000f180e7224 */
/* 0x000fe400078e020c */
/*0f90*/ ISETP.NE.OR P0, PT, R10, RZ, P0 ; /* 0x000000ff0a00720c */
/* 0x000fda0000705670 */
/*0fa0*/ @!P0 BRA 0x1130 ; /* 0x0000018000008947 */
/* 0x000fea0003800000 */
/*0fb0*/ HFMA2.MMA R21, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff157435 */
/* 0x000fe200000001ff */
/*0fc0*/ IMAD R22, R0, c[0x0][0x17c], R3 ; /* 0x00005f0000167a24 */
/* 0x000fe400078e0203 */
/*0fd0*/ IMAD R12, R5, c[0x0][0x180], R0 ; /* 0x00006000050c7a24 */
/* 0x000fce00078e0200 */
/*0fe0*/ IMAD.WIDE R22, R22, R21, c[0x0][0x168] ; /* 0x00005a0016167625 */
/* 0x000fc800078e0215 */
/*0ff0*/ IMAD.WIDE R12, R12, R21, c[0x0][0x160] ; /* 0x000058000c0c7625 */
/* 0x000fc800078e0215 */
/*1000*/ IMAD.WIDE R16, R21.reuse, c[0x0][0x17c], R22 ; /* 0x00005f0015107a25 */
/* 0x040fe200078e0216 */
/*1010*/ LDG.E R11, [R12.64] ; /* 0x000000040c0b7981 */
/* 0x000ea8000c1e1900 */
/*1020*/ LDG.E R23, [R22.64] ; /* 0x0000000416177981 */
/* 0x000ea2000c1e1900 */
/*1030*/ IMAD.WIDE R18, R21, c[0x0][0x17c], R16 ; /* 0x00005f0015127a25 */
/* 0x000fc600078e0210 */
/*1040*/ LDG.E R16, [R16.64] ; /* 0x0000000410107981 */
/* 0x000ee8000c1e1900 */
/*1050*/ LDG.E R15, [R12.64+0x4] ; /* 0x000004040c0f7981 */
/* 0x000ee2000c1e1900 */
/*1060*/ IMAD.WIDE R20, R21, c[0x0][0x17c], R18 ; /* 0x00005f0015147a25 */
/* 0x000fc600078e0212 */
/*1070*/ LDG.E R24, [R18.64] ; /* 0x0000000412187981 */
/* 0x000f28000c1e1900 */
/*1080*/ LDG.E R25, [R12.64+0x8] ; /* 0x000008040c197981 */
/* 0x000f28000c1e1900 */
/*1090*/ LDG.E R26, [R12.64+0xc] ; /* 0x00000c040c1a7981 */
/* 0x000f68000c1e1900 */
/*10a0*/ LDG.E R20, [R20.64] ; /* 0x0000000414147981 */
/* 0x000f62000c1e1900 */
/*10b0*/ IADD3 R10, R10, -0x4, RZ ; /* 0xfffffffc0a0a7810 */
/* 0x000fc80007ffe0ff */
/*10c0*/ ISETP.NE.AND P0, PT, R10, RZ, PT ; /* 0x000000ff0a00720c */
/* 0x000fe40003f05270 */
/*10d0*/ IADD3 R0, R0, 0x4, RZ ; /* 0x0000000400007810 */
/* 0x000fe20007ffe0ff */
/*10e0*/ IMAD R11, R23, R11, R14 ; /* 0x0000000b170b7224 */
/* 0x004fc800078e020e */
/*10f0*/ IMAD R11, R16, R15, R11 ; /* 0x0000000f100b7224 */
/* 0x008fc800078e020b */
/*1100*/ IMAD R11, R24, R25, R11 ; /* 0x00000019180b7224 */
/* 0x010fc800078e020b */
/*1110*/ IMAD R14, R20, R26, R11 ; /* 0x0000001a140e7224 */
/* 0x020fe200078e020b */
/*1120*/ @P0 BRA 0xfb0 ; /* 0xfffffe8000000947 */
/* 0x000fea000383ffff */
/*1130*/ ISETP.NE.AND P0, PT, R9, RZ, PT ; /* 0x000000ff0900720c */
/* 0x000fda0003f05270 */
/*1140*/ @!P0 BRA 0x12a0 ; /* 0x0000015000008947 */
/* 0x000fea0003800000 */
/*1150*/ MOV R18, 0x4 ; /* 0x0000000400127802 */
/* 0x000fe20000000f00 */
/*1160*/ IMAD R15, R0, c[0x0][0x17c], R3 ; /* 0x00005f00000f7a24 */
/* 0x000fe400078e0203 */
/*1170*/ IMAD R10, R5, c[0x0][0x180], R0 ; /* 0x00006000050a7a24 */
/* 0x000fe400078e0200 */
/*1180*/ IMAD.WIDE R12, R15, R18, c[0x0][0x168] ; /* 0x00005a000f0c7625 */
/* 0x000fc800078e0212 */
/*1190*/ IMAD.WIDE R10, R10, R18, c[0x0][0x160] ; /* 0x000058000a0a7625 */
/* 0x000fe400078e0212 */
/*11a0*/ LDG.E R13, [R12.64] ; /* 0x000000040c0d7981 */
/* 0x000ea8000c1e1900 */
/*11b0*/ LDG.E R0, [R10.64] ; /* 0x000000040a007981 */
/* 0x000ea2000c1e1900 */
/*11c0*/ ISETP.NE.AND P0, PT, R9, 0x1, PT ; /* 0x000000010900780c */
/* 0x000fe20003f05270 */
/*11d0*/ IMAD R14, R13, R0, R14 ; /* 0x000000000d0e7224 */
/* 0x004fd800078e020e */
/*11e0*/ @!P0 BRA 0x12a0 ; /* 0x000000b000008947 */
/* 0x000fea0003800000 */
/*11f0*/ ISETP.NE.AND P0, PT, R9, 0x2, PT ; /* 0x000000020900780c */
/* 0x000fe20003f05270 */
/*1200*/ LDG.E R0, [R10.64+0x4] ; /* 0x000004040a007981 */
/* 0x000ea2000c1e1900 */
/*1210*/ IADD3 R15, R15, c[0x0][0x17c], RZ ; /* 0x00005f000f0f7a10 */
/* 0x000fca0007ffe0ff */
/*1220*/ IMAD.WIDE R16, R15, R18, c[0x0][0x168] ; /* 0x00005a000f107625 */
/* 0x000fcc00078e0212 */
/*1230*/ @P0 IADD3 R12, R15, c[0x0][0x17c], RZ ; /* 0x00005f000f0c0a10 */
/* 0x000fe20007ffe0ff */
/*1240*/ LDG.E R17, [R16.64] ; /* 0x0000000410117981 */
/* 0x000ea8000c1e1900 */
/*1250*/ @P0 IMAD.WIDE R12, R12, R18, c[0x0][0x168] ; /* 0x00005a000c0c0625 */
/* 0x000fe200078e0212 */
/*1260*/ @P0 LDG.E R15, [R10.64+0x8] ; /* 0x000008040a0f0981 */
/* 0x000eea000c1e1900 */
/*1270*/ @P0 LDG.E R13, [R12.64] ; /* 0x000000040c0d0981 */
/* 0x000ee2000c1e1900 */
/*1280*/ IMAD R14, R17, R0, R14 ; /* 0x00000000110e7224 */
/* 0x004fc800078e020e */
/*1290*/ @P0 IMAD R14, R13, R15, R14 ; /* 0x0000000f0d0e0224 */
/* 0x008fe400078e020e */
/*12a0*/ HFMA2.MMA R11, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff0b7435 */
/* 0x000fe200000001ff */
/*12b0*/ IADD3 R10, R2, R3, RZ ; /* 0x00000003020a7210 */
/* 0x000fe40007ffe0ff */
/*12c0*/ IADD3 R3, R3, 0x1, RZ ; /* 0x0000000103037810 */
/* 0x000fc80007ffe0ff */
/*12d0*/ ISETP.GE.AND P0, PT, R3, c[0x0][0x17c], PT ; /* 0x00005f0003007a0c */
/* 0x000fc60003f06270 */
/*12e0*/ IMAD.WIDE R10, R10, R11, c[0x0][0x170] ; /* 0x00005c000a0a7625 */
/* 0x000fca00078e020b */
/*12f0*/ STG.E [R10.64], R14 ; /* 0x0000000e0a007986 */
/* 0x0001ea000c101904 */
/*1300*/ @!P0 BRA 0x6c0 ; /* 0xfffff3b000008947 */
/* 0x000fea000383ffff */
/*1310*/ @P3 CALL.REL.NOINC 0x1330 ; /* 0x0000001000003944 */
/* 0x002fe20003c00000 */
/*1320*/ BRA 0x140 ; /* 0xffffee1000007947 */
/* 0x000fea000383ffff */
/*1330*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*1340*/ BRA 0x1340; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*1350*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*1360*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*1370*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*1380*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*1390*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*13a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*13b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*13c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*13d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*13e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*13f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
/*
Vector addition with a single thread for each addition
*/
/*
Vector addition with thread mapping and thread accessing its neighbor parallely
*/
//slower than simpler
/*
Matrix Matrix multiplication with a single thread for each row
*/
/*
Matrix Matrix multiplication with a single thread for each result element
*/
/*
Matrix Vector multiplication with a block with 4 threads per block, shared block mem and parallel reduce
*/
__global__ void matrix_matrix_mul_old(int *a, int *b, int *c, int n_row, int n_col, int n_comm)
{
int tid= threadIdx.x + blockIdx.x * blockDim.x;
int temp=0;
while(tid<n_row)
{
for (int k=0;k<n_col;k++)
{
temp=0;
for(int j=0;j<n_comm;j++)
{
temp+= a[n_comm*tid+j]* b[j*n_col+k];
}
c[tid*n_col+k]=temp;
}
tid+=blockDim.x * gridDim.x;
}
} | .file "tmpxft_000ff1d8_00000000-6_matrix_matrix_mul_old.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z48__device_stub__Z21matrix_matrix_mul_oldPiS_S_iiiPiS_S_iii
.type _Z48__device_stub__Z21matrix_matrix_mul_oldPiS_S_iiiPiS_S_iii, @function
_Z48__device_stub__Z21matrix_matrix_mul_oldPiS_S_iiiPiS_S_iii:
.LFB2051:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 20(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 12(%rsp), %rax
movq %rax, 152(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z21matrix_matrix_mul_oldPiS_S_iii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z48__device_stub__Z21matrix_matrix_mul_oldPiS_S_iiiPiS_S_iii, .-_Z48__device_stub__Z21matrix_matrix_mul_oldPiS_S_iiiPiS_S_iii
.globl _Z21matrix_matrix_mul_oldPiS_S_iii
.type _Z21matrix_matrix_mul_oldPiS_S_iii, @function
_Z21matrix_matrix_mul_oldPiS_S_iii:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z48__device_stub__Z21matrix_matrix_mul_oldPiS_S_iiiPiS_S_iii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z21matrix_matrix_mul_oldPiS_S_iii, .-_Z21matrix_matrix_mul_oldPiS_S_iii
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z21matrix_matrix_mul_oldPiS_S_iii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z21matrix_matrix_mul_oldPiS_S_iii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
/*
Vector addition with a single thread for each addition
*/
/*
Vector addition with thread mapping and thread accessing its neighbor parallely
*/
//slower than simpler
/*
Matrix Matrix multiplication with a single thread for each row
*/
/*
Matrix Matrix multiplication with a single thread for each result element
*/
/*
Matrix Vector multiplication with a block with 4 threads per block, shared block mem and parallel reduce
*/
__global__ void matrix_matrix_mul_old(int *a, int *b, int *c, int n_row, int n_col, int n_comm)
{
int tid= threadIdx.x + blockIdx.x * blockDim.x;
int temp=0;
while(tid<n_row)
{
for (int k=0;k<n_col;k++)
{
temp=0;
for(int j=0;j<n_comm;j++)
{
temp+= a[n_comm*tid+j]* b[j*n_col+k];
}
c[tid*n_col+k]=temp;
}
tid+=blockDim.x * gridDim.x;
}
} | #include <hip/hip_runtime.h>
#include "includes.h"
/*
Vector addition with a single thread for each addition
*/
/*
Vector addition with thread mapping and thread accessing its neighbor parallely
*/
//slower than simpler
/*
Matrix Matrix multiplication with a single thread for each row
*/
/*
Matrix Matrix multiplication with a single thread for each result element
*/
/*
Matrix Vector multiplication with a block with 4 threads per block, shared block mem and parallel reduce
*/
__global__ void matrix_matrix_mul_old(int *a, int *b, int *c, int n_row, int n_col, int n_comm)
{
int tid= threadIdx.x + blockIdx.x * blockDim.x;
int temp=0;
while(tid<n_row)
{
for (int k=0;k<n_col;k++)
{
temp=0;
for(int j=0;j<n_comm;j++)
{
temp+= a[n_comm*tid+j]* b[j*n_col+k];
}
c[tid*n_col+k]=temp;
}
tid+=blockDim.x * gridDim.x;
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
/*
Vector addition with a single thread for each addition
*/
/*
Vector addition with thread mapping and thread accessing its neighbor parallely
*/
//slower than simpler
/*
Matrix Matrix multiplication with a single thread for each row
*/
/*
Matrix Matrix multiplication with a single thread for each result element
*/
/*
Matrix Vector multiplication with a block with 4 threads per block, shared block mem and parallel reduce
*/
__global__ void matrix_matrix_mul_old(int *a, int *b, int *c, int n_row, int n_col, int n_comm)
{
int tid= threadIdx.x + blockIdx.x * blockDim.x;
int temp=0;
while(tid<n_row)
{
for (int k=0;k<n_col;k++)
{
temp=0;
for(int j=0;j<n_comm;j++)
{
temp+= a[n_comm*tid+j]* b[j*n_col+k];
}
c[tid*n_col+k]=temp;
}
tid+=blockDim.x * gridDim.x;
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z21matrix_matrix_mul_oldPiS_S_iii
.globl _Z21matrix_matrix_mul_oldPiS_S_iii
.p2align 8
.type _Z21matrix_matrix_mul_oldPiS_S_iii,@function
_Z21matrix_matrix_mul_oldPiS_S_iii:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x34
s_load_b32 s10, s[0:1], 0x18
s_add_u32 s4, s0, 40
s_addc_u32 s5, s1, 0
s_waitcnt lgkmcnt(0)
s_and_b32 s8, s2, 0xffff
s_mov_b32 s2, exec_lo
v_mad_u64_u32 v[1:2], null, s15, s8, v[0:1]
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s10, v1
s_cbranch_execz .LBB0_9
s_load_b64 s[2:3], s[0:1], 0x1c
s_load_b32 s11, s[4:5], 0x0
s_clause 0x1
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[0:1], s[0:1], 0x10
v_mov_b32_e32 v7, 0
s_mov_b32 s9, 0
s_mov_b32 s15, 0
s_waitcnt lgkmcnt(0)
v_mul_lo_u32 v2, s3, v1
s_cmp_gt_i32 s2, 0
s_mul_i32 s11, s11, s8
s_cselect_b32 s12, -1, 0
s_cmp_gt_i32 s3, 0
s_mul_i32 s14, s11, s3
s_cselect_b32 s13, -1, 0
s_branch .LBB0_3
.LBB0_2:
s_set_inst_prefetch_distance 0x2
v_add_nc_u32_e32 v1, s11, v1
v_add_nc_u32_e32 v2, s14, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_cmp_le_i32_e32 vcc_lo, s10, v1
s_or_b32 s15, vcc_lo, s15
s_and_not1_b32 exec_lo, exec_lo, s15
s_cbranch_execz .LBB0_9
.LBB0_3:
s_and_not1_b32 vcc_lo, exec_lo, s12
s_cbranch_vccnz .LBB0_2
v_ashrrev_i32_e32 v3, 31, v2
v_mul_lo_u32 v8, v1, s2
s_mov_b32 s16, 0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[3:4], 2, v[2:3]
v_add_co_u32 v3, vcc_lo, s4, v3
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v4, vcc_lo, s5, v4, vcc_lo
s_set_inst_prefetch_distance 0x1
s_branch .LBB0_6
.p2align 6
.LBB0_5:
v_add_nc_u32_e32 v5, s16, v8
s_add_i32 s16, s16, 1
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
s_cmp_eq_u32 s16, s2
v_ashrrev_i32_e32 v6, 31, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[5:6], 2, v[5:6]
v_add_co_u32 v5, vcc_lo, s0, v5
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v6, vcc_lo, s1, v6, vcc_lo
global_store_b32 v[5:6], v0, off
s_cbranch_scc1 .LBB0_2
.LBB0_6:
v_mov_b32_e32 v0, 0
s_and_not1_b32 vcc_lo, exec_lo, s13
s_cbranch_vccnz .LBB0_5
v_dual_mov_b32 v6, v4 :: v_dual_mov_b32 v5, v3
s_mov_b32 s8, s16
s_mov_b32 s17, s3
.p2align 6
.LBB0_8:
s_lshl_b64 s[18:19], s[8:9], 2
s_delay_alu instid0(SALU_CYCLE_1)
s_add_u32 s18, s6, s18
s_addc_u32 s19, s7, s19
global_load_b32 v11, v[5:6], off
global_load_b32 v12, v7, s[18:19]
v_add_co_u32 v5, vcc_lo, v5, 4
v_add_co_ci_u32_e32 v6, vcc_lo, 0, v6, vcc_lo
s_add_i32 s17, s17, -1
s_add_i32 s8, s8, s2
s_cmp_eq_u32 s17, 0
s_waitcnt vmcnt(0)
v_mad_u64_u32 v[9:10], null, v12, v11, v[0:1]
s_delay_alu instid0(VALU_DEP_1)
v_mov_b32_e32 v0, v9
s_cbranch_scc0 .LBB0_8
s_branch .LBB0_5
.LBB0_9:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z21matrix_matrix_mul_oldPiS_S_iii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 296
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 13
.amdhsa_next_free_sgpr 20
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z21matrix_matrix_mul_oldPiS_S_iii, .Lfunc_end0-_Z21matrix_matrix_mul_oldPiS_S_iii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: by_value
- .offset: 40
.size: 4
.value_kind: hidden_block_count_x
- .offset: 44
.size: 4
.value_kind: hidden_block_count_y
- .offset: 48
.size: 4
.value_kind: hidden_block_count_z
- .offset: 52
.size: 2
.value_kind: hidden_group_size_x
- .offset: 54
.size: 2
.value_kind: hidden_group_size_y
- .offset: 56
.size: 2
.value_kind: hidden_group_size_z
- .offset: 58
.size: 2
.value_kind: hidden_remainder_x
- .offset: 60
.size: 2
.value_kind: hidden_remainder_y
- .offset: 62
.size: 2
.value_kind: hidden_remainder_z
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 104
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 296
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z21matrix_matrix_mul_oldPiS_S_iii
.private_segment_fixed_size: 0
.sgpr_count: 22
.sgpr_spill_count: 0
.symbol: _Z21matrix_matrix_mul_oldPiS_S_iii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 13
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
/*
Vector addition with a single thread for each addition
*/
/*
Vector addition with thread mapping and thread accessing its neighbor parallely
*/
//slower than simpler
/*
Matrix Matrix multiplication with a single thread for each row
*/
/*
Matrix Matrix multiplication with a single thread for each result element
*/
/*
Matrix Vector multiplication with a block with 4 threads per block, shared block mem and parallel reduce
*/
__global__ void matrix_matrix_mul_old(int *a, int *b, int *c, int n_row, int n_col, int n_comm)
{
int tid= threadIdx.x + blockIdx.x * blockDim.x;
int temp=0;
while(tid<n_row)
{
for (int k=0;k<n_col;k++)
{
temp=0;
for(int j=0;j<n_comm;j++)
{
temp+= a[n_comm*tid+j]* b[j*n_col+k];
}
c[tid*n_col+k]=temp;
}
tid+=blockDim.x * gridDim.x;
}
} | .text
.file "matrix_matrix_mul_old.hip"
.globl _Z36__device_stub__matrix_matrix_mul_oldPiS_S_iii # -- Begin function _Z36__device_stub__matrix_matrix_mul_oldPiS_S_iii
.p2align 4, 0x90
.type _Z36__device_stub__matrix_matrix_mul_oldPiS_S_iii,@function
_Z36__device_stub__matrix_matrix_mul_oldPiS_S_iii: # @_Z36__device_stub__matrix_matrix_mul_oldPiS_S_iii
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 20(%rsp), %rax
movq %rax, 120(%rsp)
leaq 16(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z21matrix_matrix_mul_oldPiS_S_iii, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end0:
.size _Z36__device_stub__matrix_matrix_mul_oldPiS_S_iii, .Lfunc_end0-_Z36__device_stub__matrix_matrix_mul_oldPiS_S_iii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z21matrix_matrix_mul_oldPiS_S_iii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z21matrix_matrix_mul_oldPiS_S_iii,@object # @_Z21matrix_matrix_mul_oldPiS_S_iii
.section .rodata,"a",@progbits
.globl _Z21matrix_matrix_mul_oldPiS_S_iii
.p2align 3, 0x0
_Z21matrix_matrix_mul_oldPiS_S_iii:
.quad _Z36__device_stub__matrix_matrix_mul_oldPiS_S_iii
.size _Z21matrix_matrix_mul_oldPiS_S_iii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z21matrix_matrix_mul_oldPiS_S_iii"
.size .L__unnamed_1, 35
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z36__device_stub__matrix_matrix_mul_oldPiS_S_iii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z21matrix_matrix_mul_oldPiS_S_iii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000ff1d8_00000000-6_matrix_matrix_mul_old.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z48__device_stub__Z21matrix_matrix_mul_oldPiS_S_iiiPiS_S_iii
.type _Z48__device_stub__Z21matrix_matrix_mul_oldPiS_S_iiiPiS_S_iii, @function
_Z48__device_stub__Z21matrix_matrix_mul_oldPiS_S_iiiPiS_S_iii:
.LFB2051:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 20(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 12(%rsp), %rax
movq %rax, 152(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z21matrix_matrix_mul_oldPiS_S_iii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z48__device_stub__Z21matrix_matrix_mul_oldPiS_S_iiiPiS_S_iii, .-_Z48__device_stub__Z21matrix_matrix_mul_oldPiS_S_iiiPiS_S_iii
.globl _Z21matrix_matrix_mul_oldPiS_S_iii
.type _Z21matrix_matrix_mul_oldPiS_S_iii, @function
_Z21matrix_matrix_mul_oldPiS_S_iii:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z48__device_stub__Z21matrix_matrix_mul_oldPiS_S_iiiPiS_S_iii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z21matrix_matrix_mul_oldPiS_S_iii, .-_Z21matrix_matrix_mul_oldPiS_S_iii
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z21matrix_matrix_mul_oldPiS_S_iii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z21matrix_matrix_mul_oldPiS_S_iii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "matrix_matrix_mul_old.hip"
.globl _Z36__device_stub__matrix_matrix_mul_oldPiS_S_iii # -- Begin function _Z36__device_stub__matrix_matrix_mul_oldPiS_S_iii
.p2align 4, 0x90
.type _Z36__device_stub__matrix_matrix_mul_oldPiS_S_iii,@function
_Z36__device_stub__matrix_matrix_mul_oldPiS_S_iii: # @_Z36__device_stub__matrix_matrix_mul_oldPiS_S_iii
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 20(%rsp), %rax
movq %rax, 120(%rsp)
leaq 16(%rsp), %rax
movq %rax, 128(%rsp)
leaq 12(%rsp), %rax
movq %rax, 136(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z21matrix_matrix_mul_oldPiS_S_iii, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end0:
.size _Z36__device_stub__matrix_matrix_mul_oldPiS_S_iii, .Lfunc_end0-_Z36__device_stub__matrix_matrix_mul_oldPiS_S_iii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z21matrix_matrix_mul_oldPiS_S_iii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z21matrix_matrix_mul_oldPiS_S_iii,@object # @_Z21matrix_matrix_mul_oldPiS_S_iii
.section .rodata,"a",@progbits
.globl _Z21matrix_matrix_mul_oldPiS_S_iii
.p2align 3, 0x0
_Z21matrix_matrix_mul_oldPiS_S_iii:
.quad _Z36__device_stub__matrix_matrix_mul_oldPiS_S_iii
.size _Z21matrix_matrix_mul_oldPiS_S_iii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z21matrix_matrix_mul_oldPiS_S_iii"
.size .L__unnamed_1, 35
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z36__device_stub__matrix_matrix_mul_oldPiS_S_iii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z21matrix_matrix_mul_oldPiS_S_iii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <stdio.h>
#include <unistd.h>
#define CUDA_CHECK_RETURN( value ) { \
cudaError_t _m_cudaStat = value; \
if ( _m_cudaStat != cudaSuccess ) { \
fprintf( stderr, "Error '%s' at line %d in file %s\n", \
cudaGetErrorString( _m_cudaStat ), __LINE__, __FILE__ ); \
exit( 1 ); \
} }
int main()
{
int deviceCount = 0;
CUDA_CHECK_RETURN( cudaGetDeviceCount( &deviceCount ) );
printf( "Device count: %d\n", deviceCount );
//
for( int i = 0; i < deviceCount; i++ )
{
CUDA_CHECK_RETURN( cudaSetDevice( i ) );
cudaDeviceProp deviceProp;
CUDA_CHECK_RETURN( cudaGetDeviceProperties( &deviceProp, i ) );
printf("GPU%d is capable of directly accessing memory from \n", i );
for( int j = 0; j < deviceCount; j++ )
{
if( i == j )
continue;
int accessible;
cudaDeviceCanAccessPeer( &accessible, i, j );
printf( " GPU%d: %s\n", j, accessible ? "yes" : "no" );
}
}
return 0;
} | code for sm_80 |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
#include <unistd.h>
#define CUDA_CHECK_RETURN( value ) { \
cudaError_t _m_cudaStat = value; \
if ( _m_cudaStat != cudaSuccess ) { \
fprintf( stderr, "Error '%s' at line %d in file %s\n", \
cudaGetErrorString( _m_cudaStat ), __LINE__, __FILE__ ); \
exit( 1 ); \
} }
int main()
{
int deviceCount = 0;
CUDA_CHECK_RETURN( cudaGetDeviceCount( &deviceCount ) );
printf( "Device count: %d\n", deviceCount );
//
for( int i = 0; i < deviceCount; i++ )
{
CUDA_CHECK_RETURN( cudaSetDevice( i ) );
cudaDeviceProp deviceProp;
CUDA_CHECK_RETURN( cudaGetDeviceProperties( &deviceProp, i ) );
printf("GPU%d is capable of directly accessing memory from \n", i );
for( int j = 0; j < deviceCount; j++ )
{
if( i == j )
continue;
int accessible;
cudaDeviceCanAccessPeer( &accessible, i, j );
printf( " GPU%d: %s\n", j, accessible ? "yes" : "no" );
}
}
return 0;
} | .file "tmpxft_0008d8ae_00000000-6_access.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2073:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2073:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "yes"
.LC1:
.string "no"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC2:
.string "/home/ubuntu/Datasets/stackv2/train-structured/RuslanKutdusov/convexHull/master/access.cu"
.align 8
.LC3:
.string "Error '%s' at line %d in file %s\n"
.section .rodata.str1.1
.LC4:
.string "Device count: %d\n"
.section .rodata.str1.8
.align 8
.LC5:
.string "GPU%d is capable of directly accessing memory from \n"
.section .rodata.str1.1
.LC6:
.string "\tGPU%d: %s\n"
.text
.globl main
.type main, @function
main:
.LFB2070:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $1064, %rsp
.cfi_def_cfa_offset 1120
movq %fs:40, %rax
movq %rax, 1048(%rsp)
xorl %eax, %eax
movl $0, 8(%rsp)
leaq 8(%rsp), %rdi
call cudaGetDeviceCount@PLT
testl %eax, %eax
jne .L18
movl 8(%rsp), %edx
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
cmpl $0, 8(%rsp)
jle .L5
movl $0, %ebp
leaq .LC1(%rip), %r15
leaq .LC0(%rip), %r14
jmp .L12
.L18:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rcx
leaq .LC2(%rip), %r9
movl $15, %r8d
leaq .LC3(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1, %edi
call exit@PLT
.L20:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rcx
leaq .LC2(%rip), %r9
movl $21, %r8d
leaq .LC3(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1, %edi
call exit@PLT
.L21:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rcx
leaq .LC2(%rip), %r9
movl $23, %r8d
leaq .LC3(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1, %edi
call exit@PLT
.L19:
movl %ebx, %edx
movl %ebp, %esi
movq %r13, %rdi
call cudaDeviceCanAccessPeer@PLT
cmpl $0, 12(%rsp)
movq %r14, %rcx
cmove %r15, %rcx
movl %ebx, %edx
movq %r12, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
.L9:
addl $1, %ebx
cmpl %ebx, 8(%rsp)
jle .L8
.L11:
cmpl %ebx, %ebp
jne .L19
jmp .L9
.L8:
addl $1, %ebp
cmpl %ebp, 8(%rsp)
jle .L5
.L12:
movl %ebp, %edi
call cudaSetDevice@PLT
testl %eax, %eax
jne .L20
leaq 16(%rsp), %rdi
movl %ebp, %esi
call cudaGetDeviceProperties_v2@PLT
testl %eax, %eax
jne .L21
movl %ebp, %edx
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
cmpl $0, 8(%rsp)
jle .L8
movl $0, %ebx
leaq 12(%rsp), %r13
leaq .LC6(%rip), %r12
jmp .L11
.L5:
movq 1048(%rsp), %rax
subq %fs:40, %rax
jne .L22
movl $0, %eax
addq $1064, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L22:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2070:
.size main, .-main
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2096:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2096:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.