system
stringclasses 7
values | input
stringlengths 10
94.5k
| output
stringlengths 14
55.5k
|
|---|---|---|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
|
#include<stdio.h>
#include<stdlib.h>
#include<time.h>
#define N 1000000 //Job size = 1M
#define M 128 // Varying Thread block size- 128, 256, 512, 1024
//add kernel
__global__ void add(int *a, int *b, int *c, int k)
{
int index = threadIdx.x+ blockIdx.x * blockDim.x;
if (index<k)
c[index] = a[index] + b[index];
}
//Random number generator function
void random_ints(int* x, int size)
{
int i;
for (i=0;i<size;i++) {
x[i]=rand()%N;
}
}
int main(void)
{
int *a, *b, *c;
int *d_a, *d_b, *d_c;
int size = N * sizeof(int);
//time start and stop
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
//Allocate device memory
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
//Allocate CPU memory
a = (int *)malloc(size); random_ints(a, N);
b = (int *)malloc(size); random_ints(b, N);
c = (int *)malloc(size);
cudaEventRecord( start, 0 );
//Copy CPU memory to GPU memory
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
//Call the add kernel
add<<<(N+M-1)/M,M>>>(d_a, d_b, d_c,N);
printf("GPU Execution Time = %f\n",time);
// Copy from device to host
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
cudaEventRecord( stop, 0 );
cudaEventSynchronize(stop);
cudaEventElapsedTime( &time, start, stop );
cudaEventDestroy( start );
cudaEventDestroy( stop );
printf("Execution Time = %f\n",time);
//Cleanup
free(a); free(b); free(c);
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
return 0;
}
|
code for sm_80
Function : _Z3addPiS_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_TID.X ; /* 0x0000000000067919 */
/* 0x000e280000002100 */
/*0020*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e240000002500 */
/*0030*/ IMAD R6, R3, c[0x0][0x0], R6 ; /* 0x0000000003067a24 */
/* 0x001fca00078e0206 */
/*0040*/ ISETP.GE.AND P0, PT, R6, c[0x0][0x178], PT ; /* 0x00005e0006007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R4, R6, R7, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x000fc800078e0207 */
/*0090*/ IMAD.WIDE R2, R6.reuse, R7.reuse, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x0c0fe400078e0207 */
/*00a0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*00b0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*00c0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fe200078e0207 */
/*00d0*/ IADD3 R9, R4, R3, RZ ; /* 0x0000000304097210 */
/* 0x004fca0007ffe0ff */
/*00e0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0100*/ BRA 0x100; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
|
#include<stdio.h>
#include<stdlib.h>
#include<time.h>
#define N 1000000 //Job size = 1M
#define M 128 // Varying Thread block size- 128, 256, 512, 1024
//add kernel
__global__ void add(int *a, int *b, int *c, int k)
{
int index = threadIdx.x+ blockIdx.x * blockDim.x;
if (index<k)
c[index] = a[index] + b[index];
}
//Random number generator function
void random_ints(int* x, int size)
{
int i;
for (i=0;i<size;i++) {
x[i]=rand()%N;
}
}
int main(void)
{
int *a, *b, *c;
int *d_a, *d_b, *d_c;
int size = N * sizeof(int);
//time start and stop
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
//Allocate device memory
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
//Allocate CPU memory
a = (int *)malloc(size); random_ints(a, N);
b = (int *)malloc(size); random_ints(b, N);
c = (int *)malloc(size);
cudaEventRecord( start, 0 );
//Copy CPU memory to GPU memory
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
//Call the add kernel
add<<<(N+M-1)/M,M>>>(d_a, d_b, d_c,N);
printf("GPU Execution Time = %f\n",time);
// Copy from device to host
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
cudaEventRecord( stop, 0 );
cudaEventSynchronize(stop);
cudaEventElapsedTime( &time, start, stop );
cudaEventDestroy( start );
cudaEventDestroy( stop );
printf("Execution Time = %f\n",time);
//Cleanup
free(a); free(b); free(c);
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
return 0;
}
|
.file "tmpxft_00055e3e_00000000-6_add_GPU.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z11random_intsPii
.type _Z11random_intsPii, @function
_Z11random_intsPii:
.LFB2057:
.cfi_startproc
endbr64
testl %esi, %esi
jle .L8
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $8, %rsp
.cfi_def_cfa_offset 32
movq %rdi, %rbx
movslq %esi, %rsi
leaq (%rdi,%rsi,4), %rbp
.L5:
call rand@PLT
movslq %eax, %rdx
imulq $1125899907, %rdx, %rdx
sarq $50, %rdx
movl %eax, %ecx
sarl $31, %ecx
subl %ecx, %edx
imull $1000000, %edx, %edx
subl %edx, %eax
movl %eax, (%rbx)
addq $4, %rbx
cmpq %rbp, %rbx
jne .L5
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L8:
.cfi_restore 3
.cfi_restore 6
ret
.cfi_endproc
.LFE2057:
.size _Z11random_intsPii, .-_Z11random_intsPii
.globl _Z27__device_stub__Z3addPiS_S_iPiS_S_i
.type _Z27__device_stub__Z3addPiS_S_iPiS_S_i, @function
_Z27__device_stub__Z3addPiS_S_iPiS_S_i:
.LFB2083:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z3addPiS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z27__device_stub__Z3addPiS_S_iPiS_S_i, .-_Z27__device_stub__Z3addPiS_S_iPiS_S_i
.globl _Z3addPiS_S_i
.type _Z3addPiS_S_i, @function
_Z3addPiS_S_i:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z27__device_stub__Z3addPiS_S_iPiS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z3addPiS_S_i, .-_Z3addPiS_S_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "GPU Execution Time = %f\n"
.LC1:
.string "Execution Time = %f\n"
.text
.globl main
.type main, @function
main:
.LFB2058:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $80, %rsp
.cfi_def_cfa_offset 112
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
leaq 32(%rsp), %rdi
call cudaEventCreate@PLT
leaq 40(%rsp), %rdi
call cudaEventCreate@PLT
leaq 8(%rsp), %rdi
movl $4000000, %esi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movl $4000000, %esi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movl $4000000, %esi
call cudaMalloc@PLT
movl $4000000, %edi
call malloc@PLT
movq %rax, %rbp
movl $1000000, %esi
movq %rax, %rdi
call _Z11random_intsPii
movl $4000000, %edi
call malloc@PLT
movq %rax, %rbx
movl $1000000, %esi
movq %rax, %rdi
call _Z11random_intsPii
movl $4000000, %edi
call malloc@PLT
movq %rax, %r12
movl $0, %esi
movq 32(%rsp), %rdi
call cudaEventRecord@PLT
movl $1, %ecx
movl $4000000, %edx
movq %rbp, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $4000000, %edx
movq %rbx, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl $128, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $7813, 48(%rsp)
movl $1, 52(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 60(%rsp), %rdx
movl $1, %ecx
movq 48(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L23
.L20:
pxor %xmm0, %xmm0
cvtss2sd 60(%rsp), %xmm0
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movl $2, %ecx
movl $4000000, %edx
movq 24(%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
movl $0, %esi
movq 40(%rsp), %rdi
call cudaEventRecord@PLT
movq 40(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 60(%rsp), %rdi
movq 40(%rsp), %rdx
movq 32(%rsp), %rsi
call cudaEventElapsedTime@PLT
movq 32(%rsp), %rdi
call cudaEventDestroy@PLT
movq 40(%rsp), %rdi
call cudaEventDestroy@PLT
pxor %xmm0, %xmm0
cvtss2sd 60(%rsp), %xmm0
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movq %rbp, %rdi
call free@PLT
movq %rbx, %rdi
call free@PLT
movq %r12, %rdi
call free@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L24
movl $0, %eax
addq $80, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L23:
.cfi_restore_state
movl $1000000, %ecx
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z27__device_stub__Z3addPiS_S_iPiS_S_i
jmp .L20
.L24:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2058:
.size main, .-main
.section .rodata.str1.1
.LC2:
.string "_Z3addPiS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z3addPiS_S_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4:
|
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
|
#include<stdio.h>
#include<stdlib.h>
#include<time.h>
#define N 1000000 //Job size = 1M
#define M 128 // Varying Thread block size- 128, 256, 512, 1024
//add kernel
__global__ void add(int *a, int *b, int *c, int k)
{
int index = threadIdx.x+ blockIdx.x * blockDim.x;
if (index<k)
c[index] = a[index] + b[index];
}
//Random number generator function
void random_ints(int* x, int size)
{
int i;
for (i=0;i<size;i++) {
x[i]=rand()%N;
}
}
int main(void)
{
int *a, *b, *c;
int *d_a, *d_b, *d_c;
int size = N * sizeof(int);
//time start and stop
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
//Allocate device memory
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
//Allocate CPU memory
a = (int *)malloc(size); random_ints(a, N);
b = (int *)malloc(size); random_ints(b, N);
c = (int *)malloc(size);
cudaEventRecord( start, 0 );
//Copy CPU memory to GPU memory
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
//Call the add kernel
add<<<(N+M-1)/M,M>>>(d_a, d_b, d_c,N);
printf("GPU Execution Time = %f\n",time);
// Copy from device to host
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
cudaEventRecord( stop, 0 );
cudaEventSynchronize(stop);
cudaEventElapsedTime( &time, start, stop );
cudaEventDestroy( start );
cudaEventDestroy( stop );
printf("Execution Time = %f\n",time);
//Cleanup
free(a); free(b); free(c);
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
return 0;
}
|
#include <hip/hip_runtime.h>
#include<stdio.h>
#include<stdlib.h>
#include<time.h>
#define N 1000000 //Job size = 1M
#define M 128 // Varying Thread block size- 128, 256, 512, 1024
//add kernel
__global__ void add(int *a, int *b, int *c, int k)
{
int index = threadIdx.x+ blockIdx.x * blockDim.x;
if (index<k)
c[index] = a[index] + b[index];
}
//Random number generator function
void random_ints(int* x, int size)
{
int i;
for (i=0;i<size;i++) {
x[i]=rand()%N;
}
}
int main(void)
{
int *a, *b, *c;
int *d_a, *d_b, *d_c;
int size = N * sizeof(int);
//time start and stop
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
//Allocate device memory
hipMalloc((void **)&d_a, size);
hipMalloc((void **)&d_b, size);
hipMalloc((void **)&d_c, size);
//Allocate CPU memory
a = (int *)malloc(size); random_ints(a, N);
b = (int *)malloc(size); random_ints(b, N);
c = (int *)malloc(size);
hipEventRecord( start, 0 );
//Copy CPU memory to GPU memory
hipMemcpy(d_a, a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, size, hipMemcpyHostToDevice);
//Call the add kernel
add<<<(N+M-1)/M,M>>>(d_a, d_b, d_c,N);
printf("GPU Execution Time = %f\n",time);
// Copy from device to host
hipMemcpy(c, d_c, size, hipMemcpyDeviceToHost);
hipEventRecord( stop, 0 );
hipEventSynchronize(stop);
hipEventElapsedTime( &time, start, stop );
hipEventDestroy( start );
hipEventDestroy( stop );
printf("Execution Time = %f\n",time);
//Cleanup
free(a); free(b); free(c);
hipFree(d_a); hipFree(d_b); hipFree(d_c);
return 0;
}
|
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
|
#include <hip/hip_runtime.h>
#include<stdio.h>
#include<stdlib.h>
#include<time.h>
#define N 1000000 //Job size = 1M
#define M 128 // Varying Thread block size- 128, 256, 512, 1024
//add kernel
__global__ void add(int *a, int *b, int *c, int k)
{
int index = threadIdx.x+ blockIdx.x * blockDim.x;
if (index<k)
c[index] = a[index] + b[index];
}
//Random number generator function
void random_ints(int* x, int size)
{
int i;
for (i=0;i<size;i++) {
x[i]=rand()%N;
}
}
int main(void)
{
int *a, *b, *c;
int *d_a, *d_b, *d_c;
int size = N * sizeof(int);
//time start and stop
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
//Allocate device memory
hipMalloc((void **)&d_a, size);
hipMalloc((void **)&d_b, size);
hipMalloc((void **)&d_c, size);
//Allocate CPU memory
a = (int *)malloc(size); random_ints(a, N);
b = (int *)malloc(size); random_ints(b, N);
c = (int *)malloc(size);
hipEventRecord( start, 0 );
//Copy CPU memory to GPU memory
hipMemcpy(d_a, a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, size, hipMemcpyHostToDevice);
//Call the add kernel
add<<<(N+M-1)/M,M>>>(d_a, d_b, d_c,N);
printf("GPU Execution Time = %f\n",time);
// Copy from device to host
hipMemcpy(c, d_c, size, hipMemcpyDeviceToHost);
hipEventRecord( stop, 0 );
hipEventSynchronize(stop);
hipEventElapsedTime( &time, start, stop );
hipEventDestroy( start );
hipEventDestroy( stop );
printf("Execution Time = %f\n",time);
//Cleanup
free(a); free(b); free(c);
hipFree(d_a); hipFree(d_b); hipFree(d_c);
return 0;
}
|
.text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z3addPiS_S_i
.globl _Z3addPiS_S_i
.p2align 8
.type _Z3addPiS_S_i,@function
_Z3addPiS_S_i:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b32 s3, s[0:1], 0x18
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_load_b128 s[4:7], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v2, v3, v2
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z3addPiS_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z3addPiS_S_i, .Lfunc_end0-_Z3addPiS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z3addPiS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z3addPiS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata
|
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
|
#include <hip/hip_runtime.h>
#include<stdio.h>
#include<stdlib.h>
#include<time.h>
#define N 1000000 //Job size = 1M
#define M 128 // Varying Thread block size- 128, 256, 512, 1024
//add kernel
__global__ void add(int *a, int *b, int *c, int k)
{
int index = threadIdx.x+ blockIdx.x * blockDim.x;
if (index<k)
c[index] = a[index] + b[index];
}
//Random number generator function
void random_ints(int* x, int size)
{
int i;
for (i=0;i<size;i++) {
x[i]=rand()%N;
}
}
int main(void)
{
int *a, *b, *c;
int *d_a, *d_b, *d_c;
int size = N * sizeof(int);
//time start and stop
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
//Allocate device memory
hipMalloc((void **)&d_a, size);
hipMalloc((void **)&d_b, size);
hipMalloc((void **)&d_c, size);
//Allocate CPU memory
a = (int *)malloc(size); random_ints(a, N);
b = (int *)malloc(size); random_ints(b, N);
c = (int *)malloc(size);
hipEventRecord( start, 0 );
//Copy CPU memory to GPU memory
hipMemcpy(d_a, a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, size, hipMemcpyHostToDevice);
//Call the add kernel
add<<<(N+M-1)/M,M>>>(d_a, d_b, d_c,N);
printf("GPU Execution Time = %f\n",time);
// Copy from device to host
hipMemcpy(c, d_c, size, hipMemcpyDeviceToHost);
hipEventRecord( stop, 0 );
hipEventSynchronize(stop);
hipEventElapsedTime( &time, start, stop );
hipEventDestroy( start );
hipEventDestroy( stop );
printf("Execution Time = %f\n",time);
//Cleanup
free(a); free(b); free(c);
hipFree(d_a); hipFree(d_b); hipFree(d_c);
return 0;
}
|
.text
.file "add_GPU.hip"
.globl _Z18__device_stub__addPiS_S_i # -- Begin function _Z18__device_stub__addPiS_S_i
.p2align 4, 0x90
.type _Z18__device_stub__addPiS_S_i,@function
_Z18__device_stub__addPiS_S_i: # @_Z18__device_stub__addPiS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z3addPiS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z18__device_stub__addPiS_S_i, .Lfunc_end0-_Z18__device_stub__addPiS_S_i
.cfi_endproc
# -- End function
.globl _Z11random_intsPii # -- Begin function _Z11random_intsPii
.p2align 4, 0x90
.type _Z11random_intsPii,@function
_Z11random_intsPii: # @_Z11random_intsPii
.cfi_startproc
# %bb.0:
testl %esi, %esi
jle .LBB1_4
# %bb.1: # %.lr.ph.preheader
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rdi, %rbx
movl %esi, %r14d
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB1_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
callq rand
cltq
imulq $1125899907, %rax, %rcx # imm = 0x431BDE83
movq %rcx, %rdx
shrq $63, %rdx
sarq $50, %rcx
addl %edx, %ecx
imull $1000000, %ecx, %ecx # imm = 0xF4240
subl %ecx, %eax
movl %eax, (%rbx,%r15,4)
incq %r15
cmpq %r15, %r14
jne .LBB1_2
# %bb.3:
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %r14
.cfi_restore %r15
.LBB1_4: # %._crit_edge
retq
.Lfunc_end1:
.size _Z11random_intsPii, .Lfunc_end1-_Z11random_intsPii
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $160, %rsp
.cfi_def_cfa_offset 192
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
leaq 8(%rsp), %rdi
callq hipEventCreate
movq %rsp, %rdi
callq hipEventCreate
leaq 32(%rsp), %rdi
movl $4000000, %esi # imm = 0x3D0900
callq hipMalloc
leaq 24(%rsp), %rdi
movl $4000000, %esi # imm = 0x3D0900
callq hipMalloc
leaq 16(%rsp), %rdi
movl $4000000, %esi # imm = 0x3D0900
callq hipMalloc
movl $4000000, %edi # imm = 0x3D0900
callq malloc
movq %rax, %rbx
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB2_1: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
callq rand
cltq
imulq $1125899907, %rax, %rcx # imm = 0x431BDE83
movq %rcx, %rdx
shrq $63, %rdx
sarq $50, %rcx
addl %edx, %ecx
imull $1000000, %ecx, %ecx # imm = 0xF4240
subl %ecx, %eax
movl %eax, (%rbx,%r14,4)
incq %r14
cmpq $1000000, %r14 # imm = 0xF4240
jne .LBB2_1
# %bb.2: # %_Z11random_intsPii.exit
movl $4000000, %edi # imm = 0x3D0900
callq malloc
movq %rax, %r14
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB2_3: # %.lr.ph.i18
# =>This Inner Loop Header: Depth=1
callq rand
cltq
imulq $1125899907, %rax, %rcx # imm = 0x431BDE83
movq %rcx, %rdx
shrq $63, %rdx
sarq $50, %rcx
addl %edx, %ecx
imull $1000000, %ecx, %ecx # imm = 0xF4240
subl %ecx, %eax
movl %eax, (%r14,%r15,4)
incq %r15
cmpq $1000000, %r15 # imm = 0xF4240
jne .LBB2_3
# %bb.4: # %_Z11random_intsPii.exit22
movl $4000000, %edi # imm = 0x3D0900
callq malloc
movq %rax, %r15
movq 8(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 32(%rsp), %rdi
movl $4000000, %edx # imm = 0x3D0900
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
movl $4000000, %edx # imm = 0x3D0900
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
movabsq $4294967424, %rdx # imm = 0x100000080
leaq 7685(%rdx), %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_6
# %bb.5:
movq 32(%rsp), %rax
movq 24(%rsp), %rcx
movq 16(%rsp), %rdx
movq %rax, 152(%rsp)
movq %rcx, 144(%rsp)
movq %rdx, 136(%rsp)
movl $1000000, 44(%rsp) # imm = 0xF4240
leaq 152(%rsp), %rax
movq %rax, 48(%rsp)
leaq 144(%rsp), %rax
movq %rax, 56(%rsp)
leaq 136(%rsp), %rax
movq %rax, 64(%rsp)
leaq 44(%rsp), %rax
movq %rax, 72(%rsp)
leaq 120(%rsp), %rdi
leaq 104(%rsp), %rsi
leaq 96(%rsp), %rdx
leaq 88(%rsp), %rcx
callq __hipPopCallConfiguration
movq 120(%rsp), %rsi
movl 128(%rsp), %edx
movq 104(%rsp), %rcx
movl 112(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z3addPiS_S_i, %edi
pushq 88(%rsp)
.cfi_adjust_cfa_offset 8
pushq 104(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_6:
movss 48(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str, %edi
movb $1, %al
callq printf
movq 16(%rsp), %rsi
movl $4000000, %edx # imm = 0x3D0900
movq %r15, %rdi
movl $2, %ecx
callq hipMemcpy
movq (%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq (%rsp), %rdi
callq hipEventSynchronize
movq 8(%rsp), %rsi
movq (%rsp), %rdx
leaq 48(%rsp), %rdi
callq hipEventElapsedTime
movq 8(%rsp), %rdi
callq hipEventDestroy
movq (%rsp), %rdi
callq hipEventDestroy
movss 48(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.1, %edi
movb $1, %al
callq printf
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
movq %r15, %rdi
callq free
movq 32(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $160, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z3addPiS_S_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z3addPiS_S_i,@object # @_Z3addPiS_S_i
.section .rodata,"a",@progbits
.globl _Z3addPiS_S_i
.p2align 3, 0x0
_Z3addPiS_S_i:
.quad _Z18__device_stub__addPiS_S_i
.size _Z3addPiS_S_i, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "GPU Execution Time = %f\n"
.size .L.str, 25
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "Execution Time = %f\n"
.size .L.str.1, 21
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z3addPiS_S_i"
.size .L__unnamed_1, 14
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z18__device_stub__addPiS_S_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z3addPiS_S_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_
|
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
|
code for sm_80
Function : _Z3addPiS_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_TID.X ; /* 0x0000000000067919 */
/* 0x000e280000002100 */
/*0020*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e240000002500 */
/*0030*/ IMAD R6, R3, c[0x0][0x0], R6 ; /* 0x0000000003067a24 */
/* 0x001fca00078e0206 */
/*0040*/ ISETP.GE.AND P0, PT, R6, c[0x0][0x178], PT ; /* 0x00005e0006007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R4, R6, R7, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x000fc800078e0207 */
/*0090*/ IMAD.WIDE R2, R6.reuse, R7.reuse, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x0c0fe400078e0207 */
/*00a0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*00b0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*00c0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fe200078e0207 */
/*00d0*/ IADD3 R9, R4, R3, RZ ; /* 0x0000000304097210 */
/* 0x004fca0007ffe0ff */
/*00e0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0100*/ BRA 0x100; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
|
.text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z3addPiS_S_i
.globl _Z3addPiS_S_i
.p2align 8
.type _Z3addPiS_S_i,@function
_Z3addPiS_S_i:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b32 s3, s[0:1], 0x18
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_load_b128 s[4:7], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v2, v3, v2
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z3addPiS_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z3addPiS_S_i, .Lfunc_end0-_Z3addPiS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z3addPiS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z3addPiS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata
|
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
|
.file "tmpxft_00055e3e_00000000-6_add_GPU.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z11random_intsPii
.type _Z11random_intsPii, @function
_Z11random_intsPii:
.LFB2057:
.cfi_startproc
endbr64
testl %esi, %esi
jle .L8
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $8, %rsp
.cfi_def_cfa_offset 32
movq %rdi, %rbx
movslq %esi, %rsi
leaq (%rdi,%rsi,4), %rbp
.L5:
call rand@PLT
movslq %eax, %rdx
imulq $1125899907, %rdx, %rdx
sarq $50, %rdx
movl %eax, %ecx
sarl $31, %ecx
subl %ecx, %edx
imull $1000000, %edx, %edx
subl %edx, %eax
movl %eax, (%rbx)
addq $4, %rbx
cmpq %rbp, %rbx
jne .L5
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L8:
.cfi_restore 3
.cfi_restore 6
ret
.cfi_endproc
.LFE2057:
.size _Z11random_intsPii, .-_Z11random_intsPii
.globl _Z27__device_stub__Z3addPiS_S_iPiS_S_i
.type _Z27__device_stub__Z3addPiS_S_iPiS_S_i, @function
_Z27__device_stub__Z3addPiS_S_iPiS_S_i:
.LFB2083:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z3addPiS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z27__device_stub__Z3addPiS_S_iPiS_S_i, .-_Z27__device_stub__Z3addPiS_S_iPiS_S_i
.globl _Z3addPiS_S_i
.type _Z3addPiS_S_i, @function
_Z3addPiS_S_i:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z27__device_stub__Z3addPiS_S_iPiS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z3addPiS_S_i, .-_Z3addPiS_S_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "GPU Execution Time = %f\n"
.LC1:
.string "Execution Time = %f\n"
.text
.globl main
.type main, @function
main:
.LFB2058:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $80, %rsp
.cfi_def_cfa_offset 112
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
leaq 32(%rsp), %rdi
call cudaEventCreate@PLT
leaq 40(%rsp), %rdi
call cudaEventCreate@PLT
leaq 8(%rsp), %rdi
movl $4000000, %esi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movl $4000000, %esi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movl $4000000, %esi
call cudaMalloc@PLT
movl $4000000, %edi
call malloc@PLT
movq %rax, %rbp
movl $1000000, %esi
movq %rax, %rdi
call _Z11random_intsPii
movl $4000000, %edi
call malloc@PLT
movq %rax, %rbx
movl $1000000, %esi
movq %rax, %rdi
call _Z11random_intsPii
movl $4000000, %edi
call malloc@PLT
movq %rax, %r12
movl $0, %esi
movq 32(%rsp), %rdi
call cudaEventRecord@PLT
movl $1, %ecx
movl $4000000, %edx
movq %rbp, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $4000000, %edx
movq %rbx, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl $128, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $7813, 48(%rsp)
movl $1, 52(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 60(%rsp), %rdx
movl $1, %ecx
movq 48(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L23
.L20:
pxor %xmm0, %xmm0
cvtss2sd 60(%rsp), %xmm0
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movl $2, %ecx
movl $4000000, %edx
movq 24(%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
movl $0, %esi
movq 40(%rsp), %rdi
call cudaEventRecord@PLT
movq 40(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 60(%rsp), %rdi
movq 40(%rsp), %rdx
movq 32(%rsp), %rsi
call cudaEventElapsedTime@PLT
movq 32(%rsp), %rdi
call cudaEventDestroy@PLT
movq 40(%rsp), %rdi
call cudaEventDestroy@PLT
pxor %xmm0, %xmm0
cvtss2sd 60(%rsp), %xmm0
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movq %rbp, %rdi
call free@PLT
movq %rbx, %rdi
call free@PLT
movq %r12, %rdi
call free@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L24
movl $0, %eax
addq $80, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L23:
.cfi_restore_state
movl $1000000, %ecx
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z27__device_stub__Z3addPiS_S_iPiS_S_i
jmp .L20
.L24:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2058:
.size main, .-main
.section .rodata.str1.1
.LC2:
.string "_Z3addPiS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z3addPiS_S_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4:
|
.text
.file "add_GPU.hip"
.globl _Z18__device_stub__addPiS_S_i # -- Begin function _Z18__device_stub__addPiS_S_i
.p2align 4, 0x90
.type _Z18__device_stub__addPiS_S_i,@function
_Z18__device_stub__addPiS_S_i: # @_Z18__device_stub__addPiS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z3addPiS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z18__device_stub__addPiS_S_i, .Lfunc_end0-_Z18__device_stub__addPiS_S_i
.cfi_endproc
# -- End function
.globl _Z11random_intsPii # -- Begin function _Z11random_intsPii
.p2align 4, 0x90
.type _Z11random_intsPii,@function
_Z11random_intsPii: # @_Z11random_intsPii
.cfi_startproc
# %bb.0:
testl %esi, %esi
jle .LBB1_4
# %bb.1: # %.lr.ph.preheader
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rdi, %rbx
movl %esi, %r14d
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB1_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
callq rand
cltq
imulq $1125899907, %rax, %rcx # imm = 0x431BDE83
movq %rcx, %rdx
shrq $63, %rdx
sarq $50, %rcx
addl %edx, %ecx
imull $1000000, %ecx, %ecx # imm = 0xF4240
subl %ecx, %eax
movl %eax, (%rbx,%r15,4)
incq %r15
cmpq %r15, %r14
jne .LBB1_2
# %bb.3:
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %r14
.cfi_restore %r15
.LBB1_4: # %._crit_edge
retq
.Lfunc_end1:
.size _Z11random_intsPii, .Lfunc_end1-_Z11random_intsPii
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $160, %rsp
.cfi_def_cfa_offset 192
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
leaq 8(%rsp), %rdi
callq hipEventCreate
movq %rsp, %rdi
callq hipEventCreate
leaq 32(%rsp), %rdi
movl $4000000, %esi # imm = 0x3D0900
callq hipMalloc
leaq 24(%rsp), %rdi
movl $4000000, %esi # imm = 0x3D0900
callq hipMalloc
leaq 16(%rsp), %rdi
movl $4000000, %esi # imm = 0x3D0900
callq hipMalloc
movl $4000000, %edi # imm = 0x3D0900
callq malloc
movq %rax, %rbx
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB2_1: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
callq rand
cltq
imulq $1125899907, %rax, %rcx # imm = 0x431BDE83
movq %rcx, %rdx
shrq $63, %rdx
sarq $50, %rcx
addl %edx, %ecx
imull $1000000, %ecx, %ecx # imm = 0xF4240
subl %ecx, %eax
movl %eax, (%rbx,%r14,4)
incq %r14
cmpq $1000000, %r14 # imm = 0xF4240
jne .LBB2_1
# %bb.2: # %_Z11random_intsPii.exit
movl $4000000, %edi # imm = 0x3D0900
callq malloc
movq %rax, %r14
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB2_3: # %.lr.ph.i18
# =>This Inner Loop Header: Depth=1
callq rand
cltq
imulq $1125899907, %rax, %rcx # imm = 0x431BDE83
movq %rcx, %rdx
shrq $63, %rdx
sarq $50, %rcx
addl %edx, %ecx
imull $1000000, %ecx, %ecx # imm = 0xF4240
subl %ecx, %eax
movl %eax, (%r14,%r15,4)
incq %r15
cmpq $1000000, %r15 # imm = 0xF4240
jne .LBB2_3
# %bb.4: # %_Z11random_intsPii.exit22
movl $4000000, %edi # imm = 0x3D0900
callq malloc
movq %rax, %r15
movq 8(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 32(%rsp), %rdi
movl $4000000, %edx # imm = 0x3D0900
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
movl $4000000, %edx # imm = 0x3D0900
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
movabsq $4294967424, %rdx # imm = 0x100000080
leaq 7685(%rdx), %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_6
# %bb.5:
movq 32(%rsp), %rax
movq 24(%rsp), %rcx
movq 16(%rsp), %rdx
movq %rax, 152(%rsp)
movq %rcx, 144(%rsp)
movq %rdx, 136(%rsp)
movl $1000000, 44(%rsp) # imm = 0xF4240
leaq 152(%rsp), %rax
movq %rax, 48(%rsp)
leaq 144(%rsp), %rax
movq %rax, 56(%rsp)
leaq 136(%rsp), %rax
movq %rax, 64(%rsp)
leaq 44(%rsp), %rax
movq %rax, 72(%rsp)
leaq 120(%rsp), %rdi
leaq 104(%rsp), %rsi
leaq 96(%rsp), %rdx
leaq 88(%rsp), %rcx
callq __hipPopCallConfiguration
movq 120(%rsp), %rsi
movl 128(%rsp), %edx
movq 104(%rsp), %rcx
movl 112(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z3addPiS_S_i, %edi
pushq 88(%rsp)
.cfi_adjust_cfa_offset 8
pushq 104(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_6:
movss 48(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str, %edi
movb $1, %al
callq printf
movq 16(%rsp), %rsi
movl $4000000, %edx # imm = 0x3D0900
movq %r15, %rdi
movl $2, %ecx
callq hipMemcpy
movq (%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq (%rsp), %rdi
callq hipEventSynchronize
movq 8(%rsp), %rsi
movq (%rsp), %rdx
leaq 48(%rsp), %rdi
callq hipEventElapsedTime
movq 8(%rsp), %rdi
callq hipEventDestroy
movq (%rsp), %rdi
callq hipEventDestroy
movss 48(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.1, %edi
movb $1, %al
callq printf
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
movq %r15, %rdi
callq free
movq 32(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $160, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z3addPiS_S_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z3addPiS_S_i,@object # @_Z3addPiS_S_i
.section .rodata,"a",@progbits
.globl _Z3addPiS_S_i
.p2align 3, 0x0
_Z3addPiS_S_i:
.quad _Z18__device_stub__addPiS_S_i
.size _Z3addPiS_S_i, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "GPU Execution Time = %f\n"
.size .L.str, 25
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "Execution Time = %f\n"
.size .L.str.1, 21
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z3addPiS_S_i"
.size .L__unnamed_1, 14
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z18__device_stub__addPiS_S_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z3addPiS_S_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_
|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void add3(float *val1, float *val2, int *num_elem)
{
int i = threadIdx.x;
val1[i] += val2[i];
}
__global__ void sub3(float *val1, float *val2, int *num_elem)
{
int i = threadIdx.x;
val1[i] += val2[i]+1;
}
int main()
{
return 0;
}
|
code for sm_80
Function : _Z4sub3PfS_Pi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R4, SR_TID.X ; /* 0x0000000000047919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0040*/ IMAD.WIDE R2, R4, R5, c[0x0][0x168] ; /* 0x00005a0004027625 */
/* 0x001fc800078e0205 */
/*0050*/ IMAD.WIDE R4, R4, R5, c[0x0][0x160] ; /* 0x0000580004047625 */
/* 0x000fe400078e0205 */
/*0060*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*0070*/ LDG.E R7, [R4.64] ; /* 0x0000000404077981 */
/* 0x000ee2000c1e1900 */
/*0080*/ FADD R0, R2, 1 ; /* 0x3f80000002007421 */
/* 0x004fc80000000000 */
/*0090*/ FADD R7, R0, R7 ; /* 0x0000000700077221 */
/* 0x008fca0000000000 */
/*00a0*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x000fe2000c101904 */
/*00b0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00c0*/ BRA 0xc0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z4add3PfS_Pi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R4, SR_TID.X ; /* 0x0000000000047919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0040*/ IMAD.WIDE R2, R4, R5, c[0x0][0x168] ; /* 0x00005a0004027625 */
/* 0x001fc800078e0205 */
/*0050*/ IMAD.WIDE R4, R4, R5, c[0x0][0x160] ; /* 0x0000580004047625 */
/* 0x000fe400078e0205 */
/*0060*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*0070*/ LDG.E R7, [R4.64] ; /* 0x0000000404077981 */
/* 0x000ea4000c1e1900 */
/*0080*/ FADD R7, R2, R7 ; /* 0x0000000702077221 */
/* 0x004fca0000000000 */
/*0090*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x000fe2000c101904 */
/*00a0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00b0*/ BRA 0xb0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void add3(float *val1, float *val2, int *num_elem)
{
int i = threadIdx.x;
val1[i] += val2[i];
}
__global__ void sub3(float *val1, float *val2, int *num_elem)
{
int i = threadIdx.x;
val1[i] += val2[i]+1;
}
int main()
{
return 0;
}
|
.file "tmpxft_0017f61f_00000000-6_kernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
movl $0, %eax
ret
.cfi_endproc
.LFE2057:
.size main, .-main
.globl _Z27__device_stub__Z4add3PfS_PiPfS_Pi
.type _Z27__device_stub__Z4add3PfS_PiPfS_Pi, @function
_Z27__device_stub__Z4add3PfS_PiPfS_Pi:
.LFB2082:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L8
.L4:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L9
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L8:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z4add3PfS_Pi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L4
.L9:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z27__device_stub__Z4add3PfS_PiPfS_Pi, .-_Z27__device_stub__Z4add3PfS_PiPfS_Pi
.globl _Z4add3PfS_Pi
.type _Z4add3PfS_Pi, @function
_Z4add3PfS_Pi:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z27__device_stub__Z4add3PfS_PiPfS_Pi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z4add3PfS_Pi, .-_Z4add3PfS_Pi
.globl _Z27__device_stub__Z4sub3PfS_PiPfS_Pi
.type _Z27__device_stub__Z4sub3PfS_PiPfS_Pi, @function
_Z27__device_stub__Z4sub3PfS_PiPfS_Pi:
.LFB2084:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L16
.L12:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L17
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L16:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z4sub3PfS_Pi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L12
.L17:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2084:
.size _Z27__device_stub__Z4sub3PfS_PiPfS_Pi, .-_Z27__device_stub__Z4sub3PfS_PiPfS_Pi
.globl _Z4sub3PfS_Pi
.type _Z4sub3PfS_Pi, @function
_Z4sub3PfS_Pi:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z27__device_stub__Z4sub3PfS_PiPfS_Pi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _Z4sub3PfS_Pi, .-_Z4sub3PfS_Pi
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z4sub3PfS_Pi"
.LC1:
.string "_Z4add3PfS_Pi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2087:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z4sub3PfS_Pi(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z4add3PfS_Pi(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2087:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4:
|
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void add3(float *val1, float *val2, int *num_elem)
{
int i = threadIdx.x;
val1[i] += val2[i];
}
__global__ void sub3(float *val1, float *val2, int *num_elem)
{
int i = threadIdx.x;
val1[i] += val2[i]+1;
}
int main()
{
return 0;
}
|
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void add3(float *val1, float *val2, int *num_elem)
{
int i = threadIdx.x;
val1[i] += val2[i];
}
__global__ void sub3(float *val1, float *val2, int *num_elem)
{
int i = threadIdx.x;
val1[i] += val2[i]+1;
}
int main()
{
return 0;
}
|
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
|
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void add3(float *val1, float *val2, int *num_elem)
{
int i = threadIdx.x;
val1[i] += val2[i];
}
__global__ void sub3(float *val1, float *val2, int *num_elem)
{
int i = threadIdx.x;
val1[i] += val2[i]+1;
}
int main()
{
return 0;
}
|
.text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z4add3PfS_Pi
.globl _Z4add3PfS_Pi
.p2align 8
.type _Z4add3PfS_Pi,@function
_Z4add3PfS_Pi:
s_load_b128 s[0:3], s[0:1], 0x0
v_lshlrev_b32_e32 v0, 2, v0
s_waitcnt lgkmcnt(0)
s_clause 0x1
global_load_b32 v1, v0, s[2:3]
global_load_b32 v2, v0, s[0:1]
s_waitcnt vmcnt(0)
v_add_f32_e32 v1, v1, v2
global_store_b32 v0, v1, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z4add3PfS_Pi
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 24
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 4
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z4add3PfS_Pi, .Lfunc_end0-_Z4add3PfS_Pi
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z4sub3PfS_Pi
.globl _Z4sub3PfS_Pi
.p2align 8
.type _Z4sub3PfS_Pi,@function
_Z4sub3PfS_Pi:
s_load_b128 s[0:3], s[0:1], 0x0
v_lshlrev_b32_e32 v0, 2, v0
s_waitcnt lgkmcnt(0)
s_clause 0x1
global_load_b32 v1, v0, s[2:3]
global_load_b32 v2, v0, s[0:1]
s_waitcnt vmcnt(1)
v_add_f32_e32 v1, 1.0, v1
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_1)
v_add_f32_e32 v1, v2, v1
global_store_b32 v0, v1, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z4sub3PfS_Pi
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 24
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 4
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z4sub3PfS_Pi, .Lfunc_end1-_Z4sub3PfS_Pi
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 24
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z4add3PfS_Pi
.private_segment_fixed_size: 0
.sgpr_count: 4
.sgpr_spill_count: 0
.symbol: _Z4add3PfS_Pi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 24
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z4sub3PfS_Pi
.private_segment_fixed_size: 0
.sgpr_count: 4
.sgpr_spill_count: 0
.symbol: _Z4sub3PfS_Pi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata
|
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
|
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void add3(float *val1, float *val2, int *num_elem)
{
int i = threadIdx.x;
val1[i] += val2[i];
}
__global__ void sub3(float *val1, float *val2, int *num_elem)
{
int i = threadIdx.x;
val1[i] += val2[i]+1;
}
int main()
{
return 0;
}
|
.text
.file "kernel.hip"
.globl _Z19__device_stub__add3PfS_Pi # -- Begin function _Z19__device_stub__add3PfS_Pi
.p2align 4, 0x90
.type _Z19__device_stub__add3PfS_Pi,@function
_Z19__device_stub__add3PfS_Pi: # @_Z19__device_stub__add3PfS_Pi
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z4add3PfS_Pi, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z19__device_stub__add3PfS_Pi, .Lfunc_end0-_Z19__device_stub__add3PfS_Pi
.cfi_endproc
# -- End function
.globl _Z19__device_stub__sub3PfS_Pi # -- Begin function _Z19__device_stub__sub3PfS_Pi
.p2align 4, 0x90
.type _Z19__device_stub__sub3PfS_Pi,@function
_Z19__device_stub__sub3PfS_Pi: # @_Z19__device_stub__sub3PfS_Pi
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z4sub3PfS_Pi, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end1:
.size _Z19__device_stub__sub3PfS_Pi, .Lfunc_end1-_Z19__device_stub__sub3PfS_Pi
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
xorl %eax, %eax
retq
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z4add3PfS_Pi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z4sub3PfS_Pi, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z4add3PfS_Pi,@object # @_Z4add3PfS_Pi
.section .rodata,"a",@progbits
.globl _Z4add3PfS_Pi
.p2align 3, 0x0
_Z4add3PfS_Pi:
.quad _Z19__device_stub__add3PfS_Pi
.size _Z4add3PfS_Pi, 8
.type _Z4sub3PfS_Pi,@object # @_Z4sub3PfS_Pi
.globl _Z4sub3PfS_Pi
.p2align 3, 0x0
_Z4sub3PfS_Pi:
.quad _Z19__device_stub__sub3PfS_Pi
.size _Z4sub3PfS_Pi, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z4add3PfS_Pi"
.size .L__unnamed_1, 14
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z4sub3PfS_Pi"
.size .L__unnamed_2, 14
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z19__device_stub__add3PfS_Pi
.addrsig_sym _Z19__device_stub__sub3PfS_Pi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z4add3PfS_Pi
.addrsig_sym _Z4sub3PfS_Pi
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_
|
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
|
code for sm_80
Function : _Z4sub3PfS_Pi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R4, SR_TID.X ; /* 0x0000000000047919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0040*/ IMAD.WIDE R2, R4, R5, c[0x0][0x168] ; /* 0x00005a0004027625 */
/* 0x001fc800078e0205 */
/*0050*/ IMAD.WIDE R4, R4, R5, c[0x0][0x160] ; /* 0x0000580004047625 */
/* 0x000fe400078e0205 */
/*0060*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*0070*/ LDG.E R7, [R4.64] ; /* 0x0000000404077981 */
/* 0x000ee2000c1e1900 */
/*0080*/ FADD R0, R2, 1 ; /* 0x3f80000002007421 */
/* 0x004fc80000000000 */
/*0090*/ FADD R7, R0, R7 ; /* 0x0000000700077221 */
/* 0x008fca0000000000 */
/*00a0*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x000fe2000c101904 */
/*00b0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00c0*/ BRA 0xc0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z4add3PfS_Pi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R4, SR_TID.X ; /* 0x0000000000047919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0040*/ IMAD.WIDE R2, R4, R5, c[0x0][0x168] ; /* 0x00005a0004027625 */
/* 0x001fc800078e0205 */
/*0050*/ IMAD.WIDE R4, R4, R5, c[0x0][0x160] ; /* 0x0000580004047625 */
/* 0x000fe400078e0205 */
/*0060*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*0070*/ LDG.E R7, [R4.64] ; /* 0x0000000404077981 */
/* 0x000ea4000c1e1900 */
/*0080*/ FADD R7, R2, R7 ; /* 0x0000000702077221 */
/* 0x004fca0000000000 */
/*0090*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x000fe2000c101904 */
/*00a0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00b0*/ BRA 0xb0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
|
.text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z4add3PfS_Pi
.globl _Z4add3PfS_Pi
.p2align 8
.type _Z4add3PfS_Pi,@function
_Z4add3PfS_Pi:
s_load_b128 s[0:3], s[0:1], 0x0
v_lshlrev_b32_e32 v0, 2, v0
s_waitcnt lgkmcnt(0)
s_clause 0x1
global_load_b32 v1, v0, s[2:3]
global_load_b32 v2, v0, s[0:1]
s_waitcnt vmcnt(0)
v_add_f32_e32 v1, v1, v2
global_store_b32 v0, v1, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z4add3PfS_Pi
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 24
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 4
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z4add3PfS_Pi, .Lfunc_end0-_Z4add3PfS_Pi
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z4sub3PfS_Pi
.globl _Z4sub3PfS_Pi
.p2align 8
.type _Z4sub3PfS_Pi,@function
_Z4sub3PfS_Pi:
s_load_b128 s[0:3], s[0:1], 0x0
v_lshlrev_b32_e32 v0, 2, v0
s_waitcnt lgkmcnt(0)
s_clause 0x1
global_load_b32 v1, v0, s[2:3]
global_load_b32 v2, v0, s[0:1]
s_waitcnt vmcnt(1)
v_add_f32_e32 v1, 1.0, v1
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_1)
v_add_f32_e32 v1, v2, v1
global_store_b32 v0, v1, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z4sub3PfS_Pi
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 24
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 4
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z4sub3PfS_Pi, .Lfunc_end1-_Z4sub3PfS_Pi
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 24
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z4add3PfS_Pi
.private_segment_fixed_size: 0
.sgpr_count: 4
.sgpr_spill_count: 0
.symbol: _Z4add3PfS_Pi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 24
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z4sub3PfS_Pi
.private_segment_fixed_size: 0
.sgpr_count: 4
.sgpr_spill_count: 0
.symbol: _Z4sub3PfS_Pi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata
|
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
|
.file "tmpxft_0017f61f_00000000-6_kernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
movl $0, %eax
ret
.cfi_endproc
.LFE2057:
.size main, .-main
.globl _Z27__device_stub__Z4add3PfS_PiPfS_Pi
.type _Z27__device_stub__Z4add3PfS_PiPfS_Pi, @function
_Z27__device_stub__Z4add3PfS_PiPfS_Pi:
.LFB2082:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L8
.L4:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L9
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L8:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z4add3PfS_Pi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L4
.L9:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z27__device_stub__Z4add3PfS_PiPfS_Pi, .-_Z27__device_stub__Z4add3PfS_PiPfS_Pi
.globl _Z4add3PfS_Pi
.type _Z4add3PfS_Pi, @function
_Z4add3PfS_Pi:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z27__device_stub__Z4add3PfS_PiPfS_Pi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z4add3PfS_Pi, .-_Z4add3PfS_Pi
.globl _Z27__device_stub__Z4sub3PfS_PiPfS_Pi
.type _Z27__device_stub__Z4sub3PfS_PiPfS_Pi, @function
_Z27__device_stub__Z4sub3PfS_PiPfS_Pi:
.LFB2084:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L16
.L12:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L17
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L16:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z4sub3PfS_Pi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L12
.L17:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2084:
.size _Z27__device_stub__Z4sub3PfS_PiPfS_Pi, .-_Z27__device_stub__Z4sub3PfS_PiPfS_Pi
.globl _Z4sub3PfS_Pi
.type _Z4sub3PfS_Pi, @function
_Z4sub3PfS_Pi:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z27__device_stub__Z4sub3PfS_PiPfS_Pi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _Z4sub3PfS_Pi, .-_Z4sub3PfS_Pi
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z4sub3PfS_Pi"
.LC1:
.string "_Z4add3PfS_Pi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2087:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z4sub3PfS_Pi(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z4add3PfS_Pi(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2087:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4:
|
.text
.file "kernel.hip"
.globl _Z19__device_stub__add3PfS_Pi # -- Begin function _Z19__device_stub__add3PfS_Pi
.p2align 4, 0x90
.type _Z19__device_stub__add3PfS_Pi,@function
_Z19__device_stub__add3PfS_Pi: # @_Z19__device_stub__add3PfS_Pi
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z4add3PfS_Pi, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z19__device_stub__add3PfS_Pi, .Lfunc_end0-_Z19__device_stub__add3PfS_Pi
.cfi_endproc
# -- End function
.globl _Z19__device_stub__sub3PfS_Pi # -- Begin function _Z19__device_stub__sub3PfS_Pi
.p2align 4, 0x90
.type _Z19__device_stub__sub3PfS_Pi,@function
_Z19__device_stub__sub3PfS_Pi: # @_Z19__device_stub__sub3PfS_Pi
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z4sub3PfS_Pi, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end1:
.size _Z19__device_stub__sub3PfS_Pi, .Lfunc_end1-_Z19__device_stub__sub3PfS_Pi
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
xorl %eax, %eax
retq
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z4add3PfS_Pi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z4sub3PfS_Pi, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z4add3PfS_Pi,@object # @_Z4add3PfS_Pi
.section .rodata,"a",@progbits
.globl _Z4add3PfS_Pi
.p2align 3, 0x0
_Z4add3PfS_Pi:
.quad _Z19__device_stub__add3PfS_Pi
.size _Z4add3PfS_Pi, 8
.type _Z4sub3PfS_Pi,@object # @_Z4sub3PfS_Pi
.globl _Z4sub3PfS_Pi
.p2align 3, 0x0
_Z4sub3PfS_Pi:
.quad _Z19__device_stub__sub3PfS_Pi
.size _Z4sub3PfS_Pi, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z4add3PfS_Pi"
.size .L__unnamed_1, 14
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z4sub3PfS_Pi"
.size .L__unnamed_2, 14
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z19__device_stub__add3PfS_Pi
.addrsig_sym _Z19__device_stub__sub3PfS_Pi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z4add3PfS_Pi
.addrsig_sym _Z4sub3PfS_Pi
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_
|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
|
namespace fastertransformer {
const unsigned int WARP_REDUCE_MASK = 0xffffffff;
const float CUDA_FLOAT_INF_NEG = -100000000.f;
const unsigned int WARP_SIZE = 32;
template <typename T>
__forceinline__ __device__ T warpReduceMax(T val) {
for (int mask = (WARP_SIZE >> 1); mask > 0; mask >>= 1)
val = max(val, __shfl_xor_sync(WARP_REDUCE_MASK, val, mask, WARP_SIZE));
return val;
}
/* Calculate the maximum of all elements in a block */
template <typename T>
__forceinline__ __device__ T blockReduceMax(T val) {
static __shared__ T shared[32];
int lane = threadIdx.x & 0x1f;
int wid = threadIdx.x >> 5;
val = warpReduceMax<T>(val);
if (lane == 0) shared[wid] = val;
__syncthreads();
val = (threadIdx.x < ((blockDim.x + 31) >> 5)) ? shared[lane]
: CUDA_FLOAT_INF_NEG;
val = warpReduceMax<T>(val);
return val;
}
/* Calculate the rough topk-th value in a block, rough but safe */
template <typename T, int K>
__forceinline__ __device__ T blockRoughTopK(T val) {
static __shared__ T shared[32];
int lane = threadIdx.x & 0x1f;
int wid = threadIdx.x >> 5;
val = warpReduceMax(val);
if (lane == 0) shared[wid] = val;
__syncthreads();
// we do not care about result of threadIdx.x bigger than (blockDim.x >> 5)
val = (threadIdx.x < (blockDim.x >> 5)) ? shared[lane] : CUDA_FLOAT_INF_NEG;
// K should be 2, 4, 6, 8, 16 or 32
for (int mask = 16; mask >= K; mask >>= 1)
val = max(val, __shfl_xor_sync(WARP_REDUCE_MASK, val, mask, 32));
for (int mask = (K >> 1); mask > 0; mask >>= 1)
val = min(val, __shfl_xor_sync(WARP_REDUCE_MASK, val, mask, 32));
return val;
}
}
|
code for sm_80
|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
|
namespace fastertransformer {
const unsigned int WARP_REDUCE_MASK = 0xffffffff;
const float CUDA_FLOAT_INF_NEG = -100000000.f;
const unsigned int WARP_SIZE = 32;
template <typename T>
__forceinline__ __device__ T warpReduceMax(T val) {
for (int mask = (WARP_SIZE >> 1); mask > 0; mask >>= 1)
val = max(val, __shfl_xor_sync(WARP_REDUCE_MASK, val, mask, WARP_SIZE));
return val;
}
/* Calculate the maximum of all elements in a block */
template <typename T>
__forceinline__ __device__ T blockReduceMax(T val) {
static __shared__ T shared[32];
int lane = threadIdx.x & 0x1f;
int wid = threadIdx.x >> 5;
val = warpReduceMax<T>(val);
if (lane == 0) shared[wid] = val;
__syncthreads();
val = (threadIdx.x < ((blockDim.x + 31) >> 5)) ? shared[lane]
: CUDA_FLOAT_INF_NEG;
val = warpReduceMax<T>(val);
return val;
}
/* Calculate the rough topk-th value in a block, rough but safe */
template <typename T, int K>
__forceinline__ __device__ T blockRoughTopK(T val) {
static __shared__ T shared[32];
int lane = threadIdx.x & 0x1f;
int wid = threadIdx.x >> 5;
val = warpReduceMax(val);
if (lane == 0) shared[wid] = val;
__syncthreads();
// we do not care about result of threadIdx.x bigger than (blockDim.x >> 5)
val = (threadIdx.x < (blockDim.x >> 5)) ? shared[lane] : CUDA_FLOAT_INF_NEG;
// K should be 2, 4, 6, 8, 16 or 32
for (int mask = 16; mask >= K; mask >>= 1)
val = max(val, __shfl_xor_sync(WARP_REDUCE_MASK, val, mask, 32));
for (int mask = (K >> 1); mask > 0; mask >>= 1)
val = min(val, __shfl_xor_sync(WARP_REDUCE_MASK, val, mask, 32));
return val;
}
}
|
.file "tmpxft_00024bfc_00000000-6_lightseq_kernels.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2032:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2032:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2055:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2055:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4:
|
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
|
namespace fastertransformer {
const unsigned int WARP_REDUCE_MASK = 0xffffffff;
const float CUDA_FLOAT_INF_NEG = -100000000.f;
const unsigned int WARP_SIZE = 32;
template <typename T>
__forceinline__ __device__ T warpReduceMax(T val) {
for (int mask = (WARP_SIZE >> 1); mask > 0; mask >>= 1)
val = max(val, __shfl_xor_sync(WARP_REDUCE_MASK, val, mask, WARP_SIZE));
return val;
}
/* Calculate the maximum of all elements in a block */
template <typename T>
__forceinline__ __device__ T blockReduceMax(T val) {
static __shared__ T shared[32];
int lane = threadIdx.x & 0x1f;
int wid = threadIdx.x >> 5;
val = warpReduceMax<T>(val);
if (lane == 0) shared[wid] = val;
__syncthreads();
val = (threadIdx.x < ((blockDim.x + 31) >> 5)) ? shared[lane]
: CUDA_FLOAT_INF_NEG;
val = warpReduceMax<T>(val);
return val;
}
/* Calculate the rough topk-th value in a block, rough but safe */
template <typename T, int K>
__forceinline__ __device__ T blockRoughTopK(T val) {
static __shared__ T shared[32];
int lane = threadIdx.x & 0x1f;
int wid = threadIdx.x >> 5;
val = warpReduceMax(val);
if (lane == 0) shared[wid] = val;
__syncthreads();
// we do not care about result of threadIdx.x bigger than (blockDim.x >> 5)
val = (threadIdx.x < (blockDim.x >> 5)) ? shared[lane] : CUDA_FLOAT_INF_NEG;
// K should be 2, 4, 6, 8, 16 or 32
for (int mask = 16; mask >= K; mask >>= 1)
val = max(val, __shfl_xor_sync(WARP_REDUCE_MASK, val, mask, 32));
for (int mask = (K >> 1); mask > 0; mask >>= 1)
val = min(val, __shfl_xor_sync(WARP_REDUCE_MASK, val, mask, 32));
return val;
}
}
|
#include <hip/hip_runtime.h>
namespace fastertransformer {
const unsigned int WARP_REDUCE_MASK = 0xffffffff;
const float CUDA_FLOAT_INF_NEG = -100000000.f;
const unsigned int WARP_SIZE = 32;
template <typename T>
__forceinline__ __device__ T warpReduceMax(T val) {
for (int mask = (WARP_SIZE >> 1); mask > 0; mask >>= 1)
val = max(val, __shfl_xor_sync(WARP_REDUCE_MASK, val, mask, WARP_SIZE));
return val;
}
/* Calculate the maximum of all elements in a block */
template <typename T>
__forceinline__ __device__ T blockReduceMax(T val) {
static __shared__ T shared[32];
int lane = threadIdx.x & 0x1f;
int wid = threadIdx.x >> 5;
val = warpReduceMax<T>(val);
if (lane == 0) shared[wid] = val;
__syncthreads();
val = (threadIdx.x < ((blockDim.x + 31) >> 5)) ? shared[lane]
: CUDA_FLOAT_INF_NEG;
val = warpReduceMax<T>(val);
return val;
}
/* Calculate the rough topk-th value in a block, rough but safe */
template <typename T, int K>
__forceinline__ __device__ T blockRoughTopK(T val) {
static __shared__ T shared[32];
int lane = threadIdx.x & 0x1f;
int wid = threadIdx.x >> 5;
val = warpReduceMax(val);
if (lane == 0) shared[wid] = val;
__syncthreads();
// we do not care about result of threadIdx.x bigger than (blockDim.x >> 5)
val = (threadIdx.x < (blockDim.x >> 5)) ? shared[lane] : CUDA_FLOAT_INF_NEG;
// K should be 2, 4, 6, 8, 16 or 32
for (int mask = 16; mask >= K; mask >>= 1)
val = max(val, __shfl_xor_sync(WARP_REDUCE_MASK, val, mask, 32));
for (int mask = (K >> 1); mask > 0; mask >>= 1)
val = min(val, __shfl_xor_sync(WARP_REDUCE_MASK, val, mask, 32));
return val;
}
}
|
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
|
#include <hip/hip_runtime.h>
namespace fastertransformer {
const unsigned int WARP_REDUCE_MASK = 0xffffffff;
const float CUDA_FLOAT_INF_NEG = -100000000.f;
const unsigned int WARP_SIZE = 32;
template <typename T>
__forceinline__ __device__ T warpReduceMax(T val) {
for (int mask = (WARP_SIZE >> 1); mask > 0; mask >>= 1)
val = max(val, __shfl_xor_sync(WARP_REDUCE_MASK, val, mask, WARP_SIZE));
return val;
}
/* Calculate the maximum of all elements in a block */
template <typename T>
__forceinline__ __device__ T blockReduceMax(T val) {
static __shared__ T shared[32];
int lane = threadIdx.x & 0x1f;
int wid = threadIdx.x >> 5;
val = warpReduceMax<T>(val);
if (lane == 0) shared[wid] = val;
__syncthreads();
val = (threadIdx.x < ((blockDim.x + 31) >> 5)) ? shared[lane]
: CUDA_FLOAT_INF_NEG;
val = warpReduceMax<T>(val);
return val;
}
/* Calculate the rough topk-th value in a block, rough but safe */
template <typename T, int K>
__forceinline__ __device__ T blockRoughTopK(T val) {
static __shared__ T shared[32];
int lane = threadIdx.x & 0x1f;
int wid = threadIdx.x >> 5;
val = warpReduceMax(val);
if (lane == 0) shared[wid] = val;
__syncthreads();
// we do not care about result of threadIdx.x bigger than (blockDim.x >> 5)
val = (threadIdx.x < (blockDim.x >> 5)) ? shared[lane] : CUDA_FLOAT_INF_NEG;
// K should be 2, 4, 6, 8, 16 or 32
for (int mask = 16; mask >= K; mask >>= 1)
val = max(val, __shfl_xor_sync(WARP_REDUCE_MASK, val, mask, 32));
for (int mask = (K >> 1); mask > 0; mask >>= 1)
val = min(val, __shfl_xor_sync(WARP_REDUCE_MASK, val, mask, 32));
return val;
}
}
|
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata
|
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
|
#include <hip/hip_runtime.h>
namespace fastertransformer {
const unsigned int WARP_REDUCE_MASK = 0xffffffff;
const float CUDA_FLOAT_INF_NEG = -100000000.f;
const unsigned int WARP_SIZE = 32;
template <typename T>
__forceinline__ __device__ T warpReduceMax(T val) {
for (int mask = (WARP_SIZE >> 1); mask > 0; mask >>= 1)
val = max(val, __shfl_xor_sync(WARP_REDUCE_MASK, val, mask, WARP_SIZE));
return val;
}
/* Calculate the maximum of all elements in a block */
template <typename T>
__forceinline__ __device__ T blockReduceMax(T val) {
static __shared__ T shared[32];
int lane = threadIdx.x & 0x1f;
int wid = threadIdx.x >> 5;
val = warpReduceMax<T>(val);
if (lane == 0) shared[wid] = val;
__syncthreads();
val = (threadIdx.x < ((blockDim.x + 31) >> 5)) ? shared[lane]
: CUDA_FLOAT_INF_NEG;
val = warpReduceMax<T>(val);
return val;
}
/* Calculate the rough topk-th value in a block, rough but safe */
template <typename T, int K>
__forceinline__ __device__ T blockRoughTopK(T val) {
static __shared__ T shared[32];
int lane = threadIdx.x & 0x1f;
int wid = threadIdx.x >> 5;
val = warpReduceMax(val);
if (lane == 0) shared[wid] = val;
__syncthreads();
// we do not care about result of threadIdx.x bigger than (blockDim.x >> 5)
val = (threadIdx.x < (blockDim.x >> 5)) ? shared[lane] : CUDA_FLOAT_INF_NEG;
// K should be 2, 4, 6, 8, 16 or 32
for (int mask = 16; mask >= K; mask >>= 1)
val = max(val, __shfl_xor_sync(WARP_REDUCE_MASK, val, mask, 32));
for (int mask = (K >> 1); mask > 0; mask >>= 1)
val = min(val, __shfl_xor_sync(WARP_REDUCE_MASK, val, mask, 32));
return val;
}
}
|
.text
.file "lightseq_kernels.hip"
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
|
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
|
code for sm_80
|
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata
|
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
|
.file "tmpxft_00024bfc_00000000-6_lightseq_kernels.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2032:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2032:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2055:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2055:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4:
|
.text
.file "lightseq_kernels.hip"
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
|
/*
Babak Poursartip
02/27/2021
CUDA
topic: pinned memory
- Instead of using malloc or new to allocation memory on the CPU(host), we use cudaHostAlloc(). This will allocate a pinned memory on the host.
- To free the memory, we use cudaFreeHost, instead of delete to deallocate.
- The disadvantage is that you cannot swap the memory to the disk & we need to have enough memory to use this memory.
*/
#include <iostream>
// ==============================
float timing(bool pinned, bool toDevice)
{
std::cout << " p: " << pinned <<" t: " << toDevice << std::endl;
const int count = 1 << 20;
const int iterations = 1 << 6;
const int size = count * sizeof(int);
cudaEvent_t start, end;
int *d, *h;
float elapsed;
cudaError_t status;
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaMalloc(&d, size);
if(pinned)
cudaHostAlloc(&h, size, cudaHostAllocDefault);
else
h = new int[count];
cudaEventRecord(start);
for (int i = 0; i < iterations; ++i)
{
if (toDevice)
status = cudaMemcpy(d, h, size, cudaMemcpyHostToDevice);
else
status = cudaMemcpy(h, d, size, cudaMemcpyDeviceToHost);
}
cudaEventRecord(end);
cudaEventSynchronize(end);
cudaEventElapsedTime(&elapsed, start, end);
if(pinned)
cudaFreeHost(h);
else
delete[] h;
cudaFree(d);
cudaEventDestroy(start);
cudaEventDestroy(end);
return elapsed;
}
// ==============================
int main()
{
printf(" starts \n");
std::cout << "From device - paged memory: \t" << timing(false, false) << std::endl;
std::cout << "To device - paged memory: \t" << timing(false, true) << std::endl;
std::cout << "From device - pinned memory: \t" << timing(true, false) << std::endl;
std::cout << "To device - pinned memory: \t" << timing(true, true) << std::endl;
printf(" done \n");
return 0;
}
|
code for sm_80
|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
|
/*
Babak Poursartip
02/27/2021
CUDA
topic: pinned memory
- Instead of using malloc or new to allocation memory on the CPU(host), we use cudaHostAlloc(). This will allocate a pinned memory on the host.
- To free the memory, we use cudaFreeHost, instead of delete to deallocate.
- The disadvantage is that you cannot swap the memory to the disk & we need to have enough memory to use this memory.
*/
#include <iostream>
// ==============================
float timing(bool pinned, bool toDevice)
{
std::cout << " p: " << pinned <<" t: " << toDevice << std::endl;
const int count = 1 << 20;
const int iterations = 1 << 6;
const int size = count * sizeof(int);
cudaEvent_t start, end;
int *d, *h;
float elapsed;
cudaError_t status;
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaMalloc(&d, size);
if(pinned)
cudaHostAlloc(&h, size, cudaHostAllocDefault);
else
h = new int[count];
cudaEventRecord(start);
for (int i = 0; i < iterations; ++i)
{
if (toDevice)
status = cudaMemcpy(d, h, size, cudaMemcpyHostToDevice);
else
status = cudaMemcpy(h, d, size, cudaMemcpyDeviceToHost);
}
cudaEventRecord(end);
cudaEventSynchronize(end);
cudaEventElapsedTime(&elapsed, start, end);
if(pinned)
cudaFreeHost(h);
else
delete[] h;
cudaFree(d);
cudaEventDestroy(start);
cudaEventDestroy(end);
return elapsed;
}
// ==============================
int main()
{
printf(" starts \n");
std::cout << "From device - paged memory: \t" << timing(false, false) << std::endl;
std::cout << "To device - paged memory: \t" << timing(false, true) << std::endl;
std::cout << "From device - pinned memory: \t" << timing(true, false) << std::endl;
std::cout << "To device - pinned memory: \t" << timing(true, true) << std::endl;
printf(" done \n");
return 0;
}
|
.file "tmpxft_0008c020_00000000-6_1_pinned_memory.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3673:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3673:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string " p: "
.LC1:
.string " t: "
.text
.globl _Z6timingbb
.type _Z6timingbb, @function
_Z6timingbb:
.LFB3669:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $56, %rsp
.cfi_def_cfa_offset 96
movl %edi, %r12d
movl %esi, %ebx
movl %esi, %ebp
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
movl $4, %edx
leaq .LC0(%rip), %rsi
leaq _ZSt4cout(%rip), %r13
movq %r13, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movzbl %r12b, %esi
movq %r13, %rdi
call _ZNSo9_M_insertIbEERSoT_@PLT
movq %rax, %r13
movl $4, %edx
leaq .LC1(%rip), %rsi
movq %rax, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movzbl %bl, %esi
movq %r13, %rdi
call _ZNSo9_M_insertIbEERSoT_@PLT
movq %rax, %rbx
movq (%rax), %rax
movq -24(%rax), %rax
movq 240(%rbx,%rax), %r13
testq %r13, %r13
je .L18
cmpb $0, 56(%r13)
je .L6
movzbl 67(%r13), %eax
.L7:
movsbl %al, %esi
movq %rbx, %rdi
call _ZNSo3putEc@PLT
movq %rax, %rdi
call _ZNSo5flushEv@PLT
leaq 8(%rsp), %rdi
call cudaEventCreate@PLT
leaq 16(%rsp), %rdi
call cudaEventCreate@PLT
leaq 24(%rsp), %rdi
movl $4194304, %esi
call cudaMalloc@PLT
testb %r12b, %r12b
je .L8
leaq 32(%rsp), %rdi
movl $0, %edx
movl $4194304, %esi
call cudaHostAlloc@PLT
.L9:
movl $0, %esi
movq 8(%rsp), %rdi
call cudaEventRecord@PLT
movl $64, %ebx
jmp .L12
.L18:
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L19
call _ZSt16__throw_bad_castv@PLT
.L19:
call __stack_chk_fail@PLT
.L6:
movq %r13, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq 0(%r13), %rax
movl $10, %esi
movq %r13, %rdi
call *48(%rax)
jmp .L7
.L8:
movl $4194304, %edi
call _Znam@PLT
movq %rax, 32(%rsp)
jmp .L9
.L10:
movl $2, %ecx
movl $4194304, %edx
movq 24(%rsp), %rsi
movq 32(%rsp), %rdi
call cudaMemcpy@PLT
.L11:
subl $1, %ebx
je .L20
.L12:
testb %bpl, %bpl
je .L10
movl $1, %ecx
movl $4194304, %edx
movq 32(%rsp), %rsi
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
jmp .L11
.L20:
movl $0, %esi
movq 16(%rsp), %rdi
call cudaEventRecord@PLT
movq 16(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 4(%rsp), %rdi
movq 16(%rsp), %rdx
movq 8(%rsp), %rsi
call cudaEventElapsedTime@PLT
testb %r12b, %r12b
jne .L21
movq 32(%rsp), %rdi
testq %rdi, %rdi
je .L14
call _ZdaPv@PLT
.L14:
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 8(%rsp), %rdi
call cudaEventDestroy@PLT
movq 16(%rsp), %rdi
call cudaEventDestroy@PLT
movss 4(%rsp), %xmm0
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L22
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L21:
.cfi_restore_state
movq 32(%rsp), %rdi
call cudaFreeHost@PLT
jmp .L14
.L22:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3669:
.size _Z6timingbb, .-_Z6timingbb
.section .rodata.str1.1
.LC2:
.string " starts \n"
.LC3:
.string "From device - paged memory: \t"
.LC4:
.string "To device - paged memory: \t"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC5:
.string "From device - pinned memory: \t"
.align 8
.LC6:
.string "To device - pinned memory: \t"
.section .rodata.str1.1
.LC7:
.string " done \n"
.text
.globl main
.type main, @function
main:
.LFB3670:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $8, %rsp
.cfi_def_cfa_offset 32
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq .LC3(%rip), %rsi
leaq _ZSt4cout(%rip), %rbx
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rbp
movl $0, %esi
movl $0, %edi
call _Z6timingbb
cvtss2sd %xmm0, %xmm0
movq %rbp, %rdi
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
leaq .LC4(%rip), %rsi
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rbp
movl $1, %esi
movl $0, %edi
call _Z6timingbb
cvtss2sd %xmm0, %xmm0
movq %rbp, %rdi
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
leaq .LC5(%rip), %rsi
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rbp
movl $0, %esi
movl $1, %edi
call _Z6timingbb
cvtss2sd %xmm0, %xmm0
movq %rbp, %rdi
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
leaq .LC6(%rip), %rsi
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rbx
movl $1, %esi
movl $1, %edi
call _Z6timingbb
cvtss2sd %xmm0, %xmm0
movq %rbx, %rdi
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %eax
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3670:
.size main, .-main
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3696:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3696:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4:
|
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
|
/*
Babak Poursartip
02/27/2021
CUDA
topic: pinned memory
- Instead of using malloc or new to allocation memory on the CPU(host), we use cudaHostAlloc(). This will allocate a pinned memory on the host.
- To free the memory, we use cudaFreeHost, instead of delete to deallocate.
- The disadvantage is that you cannot swap the memory to the disk & we need to have enough memory to use this memory.
*/
#include <iostream>
// ==============================
float timing(bool pinned, bool toDevice)
{
std::cout << " p: " << pinned <<" t: " << toDevice << std::endl;
const int count = 1 << 20;
const int iterations = 1 << 6;
const int size = count * sizeof(int);
cudaEvent_t start, end;
int *d, *h;
float elapsed;
cudaError_t status;
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaMalloc(&d, size);
if(pinned)
cudaHostAlloc(&h, size, cudaHostAllocDefault);
else
h = new int[count];
cudaEventRecord(start);
for (int i = 0; i < iterations; ++i)
{
if (toDevice)
status = cudaMemcpy(d, h, size, cudaMemcpyHostToDevice);
else
status = cudaMemcpy(h, d, size, cudaMemcpyDeviceToHost);
}
cudaEventRecord(end);
cudaEventSynchronize(end);
cudaEventElapsedTime(&elapsed, start, end);
if(pinned)
cudaFreeHost(h);
else
delete[] h;
cudaFree(d);
cudaEventDestroy(start);
cudaEventDestroy(end);
return elapsed;
}
// ==============================
int main()
{
printf(" starts \n");
std::cout << "From device - paged memory: \t" << timing(false, false) << std::endl;
std::cout << "To device - paged memory: \t" << timing(false, true) << std::endl;
std::cout << "From device - pinned memory: \t" << timing(true, false) << std::endl;
std::cout << "To device - pinned memory: \t" << timing(true, true) << std::endl;
printf(" done \n");
return 0;
}
|
/*
Babak Poursartip
02/27/2021
CUDA
topic: pinned memory
- Instead of using malloc or new to allocation memory on the CPU(host), we use cudaHostAlloc(). This will allocate a pinned memory on the host.
- To free the memory, we use cudaFreeHost, instead of delete to deallocate.
- The disadvantage is that you cannot swap the memory to the disk & we need to have enough memory to use this memory.
*/
#include <hip/hip_runtime.h>
#include <iostream>
// ==============================
float timing(bool pinned, bool toDevice)
{
std::cout << " p: " << pinned <<" t: " << toDevice << std::endl;
const int count = 1 << 20;
const int iterations = 1 << 6;
const int size = count * sizeof(int);
hipEvent_t start, end;
int *d, *h;
float elapsed;
hipError_t status;
hipEventCreate(&start);
hipEventCreate(&end);
hipMalloc(&d, size);
if(pinned)
hipHostAlloc(&h, size, hipHostMallocDefault);
else
h = new int[count];
hipEventRecord(start);
for (int i = 0; i < iterations; ++i)
{
if (toDevice)
status = hipMemcpy(d, h, size, hipMemcpyHostToDevice);
else
status = hipMemcpy(h, d, size, hipMemcpyDeviceToHost);
}
hipEventRecord(end);
hipEventSynchronize(end);
hipEventElapsedTime(&elapsed, start, end);
if(pinned)
hipHostFree(h);
else
delete[] h;
hipFree(d);
hipEventDestroy(start);
hipEventDestroy(end);
return elapsed;
}
// ==============================
int main()
{
printf(" starts \n");
std::cout << "From device - paged memory: \t" << timing(false, false) << std::endl;
std::cout << "To device - paged memory: \t" << timing(false, true) << std::endl;
std::cout << "From device - pinned memory: \t" << timing(true, false) << std::endl;
std::cout << "To device - pinned memory: \t" << timing(true, true) << std::endl;
printf(" done \n");
return 0;
}
|
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
|
/*
Babak Poursartip
02/27/2021
CUDA
topic: pinned memory
- Instead of using malloc or new to allocation memory on the CPU(host), we use cudaHostAlloc(). This will allocate a pinned memory on the host.
- To free the memory, we use cudaFreeHost, instead of delete to deallocate.
- The disadvantage is that you cannot swap the memory to the disk & we need to have enough memory to use this memory.
*/
#include <hip/hip_runtime.h>
#include <iostream>
// ==============================
float timing(bool pinned, bool toDevice)
{
std::cout << " p: " << pinned <<" t: " << toDevice << std::endl;
const int count = 1 << 20;
const int iterations = 1 << 6;
const int size = count * sizeof(int);
hipEvent_t start, end;
int *d, *h;
float elapsed;
hipError_t status;
hipEventCreate(&start);
hipEventCreate(&end);
hipMalloc(&d, size);
if(pinned)
hipHostAlloc(&h, size, hipHostMallocDefault);
else
h = new int[count];
hipEventRecord(start);
for (int i = 0; i < iterations; ++i)
{
if (toDevice)
status = hipMemcpy(d, h, size, hipMemcpyHostToDevice);
else
status = hipMemcpy(h, d, size, hipMemcpyDeviceToHost);
}
hipEventRecord(end);
hipEventSynchronize(end);
hipEventElapsedTime(&elapsed, start, end);
if(pinned)
hipHostFree(h);
else
delete[] h;
hipFree(d);
hipEventDestroy(start);
hipEventDestroy(end);
return elapsed;
}
// ==============================
int main()
{
printf(" starts \n");
std::cout << "From device - paged memory: \t" << timing(false, false) << std::endl;
std::cout << "To device - paged memory: \t" << timing(false, true) << std::endl;
std::cout << "From device - pinned memory: \t" << timing(true, false) << std::endl;
std::cout << "To device - pinned memory: \t" << timing(true, true) << std::endl;
printf(" done \n");
return 0;
}
|
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata
|
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
|
/*
Babak Poursartip
02/27/2021
CUDA
topic: pinned memory
- Instead of using malloc or new to allocation memory on the CPU(host), we use cudaHostAlloc(). This will allocate a pinned memory on the host.
- To free the memory, we use cudaFreeHost, instead of delete to deallocate.
- The disadvantage is that you cannot swap the memory to the disk & we need to have enough memory to use this memory.
*/
#include <hip/hip_runtime.h>
#include <iostream>
// ==============================
float timing(bool pinned, bool toDevice)
{
std::cout << " p: " << pinned <<" t: " << toDevice << std::endl;
const int count = 1 << 20;
const int iterations = 1 << 6;
const int size = count * sizeof(int);
hipEvent_t start, end;
int *d, *h;
float elapsed;
hipError_t status;
hipEventCreate(&start);
hipEventCreate(&end);
hipMalloc(&d, size);
if(pinned)
hipHostAlloc(&h, size, hipHostMallocDefault);
else
h = new int[count];
hipEventRecord(start);
for (int i = 0; i < iterations; ++i)
{
if (toDevice)
status = hipMemcpy(d, h, size, hipMemcpyHostToDevice);
else
status = hipMemcpy(h, d, size, hipMemcpyDeviceToHost);
}
hipEventRecord(end);
hipEventSynchronize(end);
hipEventElapsedTime(&elapsed, start, end);
if(pinned)
hipHostFree(h);
else
delete[] h;
hipFree(d);
hipEventDestroy(start);
hipEventDestroy(end);
return elapsed;
}
// ==============================
int main()
{
printf(" starts \n");
std::cout << "From device - paged memory: \t" << timing(false, false) << std::endl;
std::cout << "To device - paged memory: \t" << timing(false, true) << std::endl;
std::cout << "From device - pinned memory: \t" << timing(true, false) << std::endl;
std::cout << "To device - pinned memory: \t" << timing(true, true) << std::endl;
printf(" done \n");
return 0;
}
|
.text
.file "1_pinned_memory.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z6timingbb # -- Begin function _Z6timingbb
.p2align 4, 0x90
.type _Z6timingbb,@function
_Z6timingbb: # @_Z6timingbb
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
subq $40, %rsp
.cfi_def_cfa_offset 80
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %esi, %ebp
movl %edi, %ebx
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $4, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movl %ebx, %esi
callq _ZNSo9_M_insertIbEERSoT_
movq %rax, %r14
movl $.L.str.1, %esi
movl $4, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq %r14, %rdi
movl %ebp, %esi
callq _ZNSo9_M_insertIbEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %r14
testq %r14, %r14
je .LBB0_17
# %bb.1: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
cmpb $0, 56(%r14)
je .LBB0_3
# %bb.2:
movzbl 67(%r14), %ecx
jmp .LBB0_4
.LBB0_3:
movq %r14, %rdi
movq %rax, %r15
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r14), %rax
movq %r14, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r15, %rax
.LBB0_4: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
leaq 24(%rsp), %rdi
callq hipEventCreate
leaq 8(%rsp), %rdi
callq hipEventCreate
leaq 16(%rsp), %rdi
movl $4194304, %esi # imm = 0x400000
callq hipMalloc
testb %bl, %bl
je .LBB0_6
# %bb.5:
movq %rsp, %rdi
movl $4194304, %esi # imm = 0x400000
xorl %edx, %edx
callq hipHostAlloc
jmp .LBB0_7
.LBB0_6:
movl $4194304, %edi # imm = 0x400000
callq _Znam
movq %rax, (%rsp)
.LBB0_7:
movq 24(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movl $64, %r14d
jmp .LBB0_8
.p2align 4, 0x90
.LBB0_10: # in Loop: Header=BB0_8 Depth=1
movq (%rsp), %rdi
movq 16(%rsp), %rsi
movl $4194304, %edx # imm = 0x400000
movl $2, %ecx
.LBB0_11: # in Loop: Header=BB0_8 Depth=1
callq hipMemcpy
decl %r14d
je .LBB0_12
.LBB0_8: # =>This Inner Loop Header: Depth=1
testb %bpl, %bpl
je .LBB0_10
# %bb.9: # in Loop: Header=BB0_8 Depth=1
movq 16(%rsp), %rdi
movq (%rsp), %rsi
movl $4194304, %edx # imm = 0x400000
movl $1, %ecx
jmp .LBB0_11
.LBB0_12:
movq 8(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 8(%rsp), %rdi
callq hipEventSynchronize
movq 24(%rsp), %rsi
movq 8(%rsp), %rdx
leaq 36(%rsp), %rdi
callq hipEventElapsedTime
movq (%rsp), %rdi
testb %bl, %bl
je .LBB0_14
# %bb.13:
callq hipHostFree
jmp .LBB0_16
.LBB0_14:
testq %rdi, %rdi
je .LBB0_16
# %bb.15:
callq _ZdaPv
.LBB0_16:
movq 16(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipEventDestroy
movq 8(%rsp), %rdi
callq hipEventDestroy
movss 36(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
addq $40, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB0_17:
.cfi_def_cfa_offset 80
callq _ZSt16__throw_bad_castv
.Lfunc_end0:
.size _Z6timingbb, .Lfunc_end0-_Z6timingbb
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
pushq %rax
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
movl $.Lstr, %edi
callq puts@PLT
movl $_ZSt4cout, %edi
movl $.L.str.3, %esi
movl $29, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
xorl %edi, %edi
xorl %esi, %esi
callq _Z6timingbb
cvtss2sd %xmm0, %xmm0
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertIdEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbx
testq %rbx, %rbx
je .LBB1_17
# %bb.1: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
cmpb $0, 56(%rbx)
je .LBB1_3
# %bb.2:
movzbl 67(%rbx), %ecx
jmp .LBB1_4
.LBB1_3:
movq %rbx, %rdi
movq %rax, %r14
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r14, %rax
.LBB1_4: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movl $_ZSt4cout, %edi
movl $.L.str.4, %esi
movl $29, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
xorl %edi, %edi
movl $1, %esi
callq _Z6timingbb
cvtss2sd %xmm0, %xmm0
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertIdEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbx
testq %rbx, %rbx
je .LBB1_17
# %bb.5: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i3
cmpb $0, 56(%rbx)
je .LBB1_7
# %bb.6:
movzbl 67(%rbx), %ecx
jmp .LBB1_8
.LBB1_7:
movq %rbx, %rdi
movq %rax, %r14
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r14, %rax
.LBB1_8: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit6
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movl $_ZSt4cout, %edi
movl $.L.str.5, %esi
movl $30, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $1, %edi
xorl %esi, %esi
callq _Z6timingbb
cvtss2sd %xmm0, %xmm0
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertIdEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbx
testq %rbx, %rbx
je .LBB1_17
# %bb.9: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i8
cmpb $0, 56(%rbx)
je .LBB1_11
# %bb.10:
movzbl 67(%rbx), %ecx
jmp .LBB1_12
.LBB1_11:
movq %rbx, %rdi
movq %rax, %r14
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r14, %rax
.LBB1_12: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit11
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movl $_ZSt4cout, %edi
movl $.L.str.6, %esi
movl $30, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $1, %edi
movl $1, %esi
callq _Z6timingbb
cvtss2sd %xmm0, %xmm0
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertIdEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbx
testq %rbx, %rbx
je .LBB1_17
# %bb.13: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i13
cmpb $0, 56(%rbx)
je .LBB1_15
# %bb.14:
movzbl 67(%rbx), %ecx
jmp .LBB1_16
.LBB1_15:
movq %rbx, %rdi
movq %rax, %r14
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r14, %rax
.LBB1_16: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit16
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movl $.Lstr.1, %edi
callq puts@PLT
xorl %eax, %eax
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
retq
.LBB1_17:
.cfi_def_cfa_offset 32
callq _ZSt16__throw_bad_castv
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz " p: "
.size .L.str, 5
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz " t: "
.size .L.str.1, 5
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "From device - paged memory: \t"
.size .L.str.3, 30
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "To device - paged memory: \t"
.size .L.str.4, 30
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "From device - pinned memory: \t"
.size .L.str.5, 31
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "To device - pinned memory: \t"
.size .L.str.6, 31
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz " starts "
.size .Lstr, 9
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz " done "
.size .Lstr.1, 7
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_cuid_
|
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
|
code for sm_80
|
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata
|
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
|
.file "tmpxft_0008c020_00000000-6_1_pinned_memory.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3673:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3673:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string " p: "
.LC1:
.string " t: "
.text
.globl _Z6timingbb
.type _Z6timingbb, @function
_Z6timingbb:
.LFB3669:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $56, %rsp
.cfi_def_cfa_offset 96
movl %edi, %r12d
movl %esi, %ebx
movl %esi, %ebp
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
movl $4, %edx
leaq .LC0(%rip), %rsi
leaq _ZSt4cout(%rip), %r13
movq %r13, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movzbl %r12b, %esi
movq %r13, %rdi
call _ZNSo9_M_insertIbEERSoT_@PLT
movq %rax, %r13
movl $4, %edx
leaq .LC1(%rip), %rsi
movq %rax, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movzbl %bl, %esi
movq %r13, %rdi
call _ZNSo9_M_insertIbEERSoT_@PLT
movq %rax, %rbx
movq (%rax), %rax
movq -24(%rax), %rax
movq 240(%rbx,%rax), %r13
testq %r13, %r13
je .L18
cmpb $0, 56(%r13)
je .L6
movzbl 67(%r13), %eax
.L7:
movsbl %al, %esi
movq %rbx, %rdi
call _ZNSo3putEc@PLT
movq %rax, %rdi
call _ZNSo5flushEv@PLT
leaq 8(%rsp), %rdi
call cudaEventCreate@PLT
leaq 16(%rsp), %rdi
call cudaEventCreate@PLT
leaq 24(%rsp), %rdi
movl $4194304, %esi
call cudaMalloc@PLT
testb %r12b, %r12b
je .L8
leaq 32(%rsp), %rdi
movl $0, %edx
movl $4194304, %esi
call cudaHostAlloc@PLT
.L9:
movl $0, %esi
movq 8(%rsp), %rdi
call cudaEventRecord@PLT
movl $64, %ebx
jmp .L12
.L18:
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L19
call _ZSt16__throw_bad_castv@PLT
.L19:
call __stack_chk_fail@PLT
.L6:
movq %r13, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq 0(%r13), %rax
movl $10, %esi
movq %r13, %rdi
call *48(%rax)
jmp .L7
.L8:
movl $4194304, %edi
call _Znam@PLT
movq %rax, 32(%rsp)
jmp .L9
.L10:
movl $2, %ecx
movl $4194304, %edx
movq 24(%rsp), %rsi
movq 32(%rsp), %rdi
call cudaMemcpy@PLT
.L11:
subl $1, %ebx
je .L20
.L12:
testb %bpl, %bpl
je .L10
movl $1, %ecx
movl $4194304, %edx
movq 32(%rsp), %rsi
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
jmp .L11
.L20:
movl $0, %esi
movq 16(%rsp), %rdi
call cudaEventRecord@PLT
movq 16(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 4(%rsp), %rdi
movq 16(%rsp), %rdx
movq 8(%rsp), %rsi
call cudaEventElapsedTime@PLT
testb %r12b, %r12b
jne .L21
movq 32(%rsp), %rdi
testq %rdi, %rdi
je .L14
call _ZdaPv@PLT
.L14:
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 8(%rsp), %rdi
call cudaEventDestroy@PLT
movq 16(%rsp), %rdi
call cudaEventDestroy@PLT
movss 4(%rsp), %xmm0
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L22
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L21:
.cfi_restore_state
movq 32(%rsp), %rdi
call cudaFreeHost@PLT
jmp .L14
.L22:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3669:
.size _Z6timingbb, .-_Z6timingbb
.section .rodata.str1.1
.LC2:
.string " starts \n"
.LC3:
.string "From device - paged memory: \t"
.LC4:
.string "To device - paged memory: \t"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC5:
.string "From device - pinned memory: \t"
.align 8
.LC6:
.string "To device - pinned memory: \t"
.section .rodata.str1.1
.LC7:
.string " done \n"
.text
.globl main
.type main, @function
main:
.LFB3670:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $8, %rsp
.cfi_def_cfa_offset 32
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq .LC3(%rip), %rsi
leaq _ZSt4cout(%rip), %rbx
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rbp
movl $0, %esi
movl $0, %edi
call _Z6timingbb
cvtss2sd %xmm0, %xmm0
movq %rbp, %rdi
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
leaq .LC4(%rip), %rsi
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rbp
movl $1, %esi
movl $0, %edi
call _Z6timingbb
cvtss2sd %xmm0, %xmm0
movq %rbp, %rdi
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
leaq .LC5(%rip), %rsi
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rbp
movl $0, %esi
movl $1, %edi
call _Z6timingbb
cvtss2sd %xmm0, %xmm0
movq %rbp, %rdi
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
leaq .LC6(%rip), %rsi
movq %rbx, %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rbx
movl $1, %esi
movl $1, %edi
call _Z6timingbb
cvtss2sd %xmm0, %xmm0
movq %rbx, %rdi
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %eax
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3670:
.size main, .-main
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3696:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3696:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4:
|
.text
.file "1_pinned_memory.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z6timingbb # -- Begin function _Z6timingbb
.p2align 4, 0x90
.type _Z6timingbb,@function
_Z6timingbb: # @_Z6timingbb
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
subq $40, %rsp
.cfi_def_cfa_offset 80
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %esi, %ebp
movl %edi, %ebx
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $4, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movl %ebx, %esi
callq _ZNSo9_M_insertIbEERSoT_
movq %rax, %r14
movl $.L.str.1, %esi
movl $4, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq %r14, %rdi
movl %ebp, %esi
callq _ZNSo9_M_insertIbEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %r14
testq %r14, %r14
je .LBB0_17
# %bb.1: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
cmpb $0, 56(%r14)
je .LBB0_3
# %bb.2:
movzbl 67(%r14), %ecx
jmp .LBB0_4
.LBB0_3:
movq %r14, %rdi
movq %rax, %r15
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r14), %rax
movq %r14, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r15, %rax
.LBB0_4: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
leaq 24(%rsp), %rdi
callq hipEventCreate
leaq 8(%rsp), %rdi
callq hipEventCreate
leaq 16(%rsp), %rdi
movl $4194304, %esi # imm = 0x400000
callq hipMalloc
testb %bl, %bl
je .LBB0_6
# %bb.5:
movq %rsp, %rdi
movl $4194304, %esi # imm = 0x400000
xorl %edx, %edx
callq hipHostAlloc
jmp .LBB0_7
.LBB0_6:
movl $4194304, %edi # imm = 0x400000
callq _Znam
movq %rax, (%rsp)
.LBB0_7:
movq 24(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movl $64, %r14d
jmp .LBB0_8
.p2align 4, 0x90
.LBB0_10: # in Loop: Header=BB0_8 Depth=1
movq (%rsp), %rdi
movq 16(%rsp), %rsi
movl $4194304, %edx # imm = 0x400000
movl $2, %ecx
.LBB0_11: # in Loop: Header=BB0_8 Depth=1
callq hipMemcpy
decl %r14d
je .LBB0_12
.LBB0_8: # =>This Inner Loop Header: Depth=1
testb %bpl, %bpl
je .LBB0_10
# %bb.9: # in Loop: Header=BB0_8 Depth=1
movq 16(%rsp), %rdi
movq (%rsp), %rsi
movl $4194304, %edx # imm = 0x400000
movl $1, %ecx
jmp .LBB0_11
.LBB0_12:
movq 8(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 8(%rsp), %rdi
callq hipEventSynchronize
movq 24(%rsp), %rsi
movq 8(%rsp), %rdx
leaq 36(%rsp), %rdi
callq hipEventElapsedTime
movq (%rsp), %rdi
testb %bl, %bl
je .LBB0_14
# %bb.13:
callq hipHostFree
jmp .LBB0_16
.LBB0_14:
testq %rdi, %rdi
je .LBB0_16
# %bb.15:
callq _ZdaPv
.LBB0_16:
movq 16(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipEventDestroy
movq 8(%rsp), %rdi
callq hipEventDestroy
movss 36(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
addq $40, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB0_17:
.cfi_def_cfa_offset 80
callq _ZSt16__throw_bad_castv
.Lfunc_end0:
.size _Z6timingbb, .Lfunc_end0-_Z6timingbb
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
pushq %rax
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
movl $.Lstr, %edi
callq puts@PLT
movl $_ZSt4cout, %edi
movl $.L.str.3, %esi
movl $29, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
xorl %edi, %edi
xorl %esi, %esi
callq _Z6timingbb
cvtss2sd %xmm0, %xmm0
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertIdEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbx
testq %rbx, %rbx
je .LBB1_17
# %bb.1: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
cmpb $0, 56(%rbx)
je .LBB1_3
# %bb.2:
movzbl 67(%rbx), %ecx
jmp .LBB1_4
.LBB1_3:
movq %rbx, %rdi
movq %rax, %r14
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r14, %rax
.LBB1_4: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movl $_ZSt4cout, %edi
movl $.L.str.4, %esi
movl $29, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
xorl %edi, %edi
movl $1, %esi
callq _Z6timingbb
cvtss2sd %xmm0, %xmm0
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertIdEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbx
testq %rbx, %rbx
je .LBB1_17
# %bb.5: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i3
cmpb $0, 56(%rbx)
je .LBB1_7
# %bb.6:
movzbl 67(%rbx), %ecx
jmp .LBB1_8
.LBB1_7:
movq %rbx, %rdi
movq %rax, %r14
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r14, %rax
.LBB1_8: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit6
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movl $_ZSt4cout, %edi
movl $.L.str.5, %esi
movl $30, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $1, %edi
xorl %esi, %esi
callq _Z6timingbb
cvtss2sd %xmm0, %xmm0
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertIdEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbx
testq %rbx, %rbx
je .LBB1_17
# %bb.9: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i8
cmpb $0, 56(%rbx)
je .LBB1_11
# %bb.10:
movzbl 67(%rbx), %ecx
jmp .LBB1_12
.LBB1_11:
movq %rbx, %rdi
movq %rax, %r14
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r14, %rax
.LBB1_12: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit11
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movl $_ZSt4cout, %edi
movl $.L.str.6, %esi
movl $30, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $1, %edi
movl $1, %esi
callq _Z6timingbb
cvtss2sd %xmm0, %xmm0
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertIdEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %rbx
testq %rbx, %rbx
je .LBB1_17
# %bb.13: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i13
cmpb $0, 56(%rbx)
je .LBB1_15
# %bb.14:
movzbl 67(%rbx), %ecx
jmp .LBB1_16
.LBB1_15:
movq %rbx, %rdi
movq %rax, %r14
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r14, %rax
.LBB1_16: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit16
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movl $.Lstr.1, %edi
callq puts@PLT
xorl %eax, %eax
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
retq
.LBB1_17:
.cfi_def_cfa_offset 32
callq _ZSt16__throw_bad_castv
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz " p: "
.size .L.str, 5
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz " t: "
.size .L.str.1, 5
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "From device - paged memory: \t"
.size .L.str.3, 30
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "To device - paged memory: \t"
.size .L.str.4, 30
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "From device - pinned memory: \t"
.size .L.str.5, 31
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "To device - pinned memory: \t"
.size .L.str.6, 31
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz " starts "
.size .Lstr, 9
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz " done "
.size .Lstr.1, 7
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_cuid_
|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
|
#include <cuda.h>
#include <cuda_runtime.h>
// read: single value-0 (50 % access)
// write: single value-1 (50 %access)
__global__
void op1(float *p, size_t N) {
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < N / 2) {
p[idx] = p[idx] + 1;
}
}
// zero value (25% access)
__global__
void op2(float *p, size_t N) {
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < N / 4) {
p[idx] = 0.0;
}
}
// approximate
__global__
void op3(float *p, size_t N) {
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < N / 2) {
p[idx] = 1.0 + idx / 10000.0;
}
}
// dense value (50% access)
__global__
void op4(int *p, size_t N) {
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < N / 2) {
p[idx] = 3;
}
}
// structured value (50% access)
__global__
void op5(int *p, size_t N) {
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx % 2 && idx < N) {
p[idx] = idx;
}
}
static const size_t N = 1000;
static const int THREADS = 128;
int main() {
float *p1;
cudaMalloc(&p1, N * sizeof(float));
cudaMemset(&p1, 0, N * sizeof(float));
auto blocks = (N - 1) / THREADS + 1;
op1<<<blocks, THREADS>>>(p1, N);
op2<<<blocks, THREADS>>>(p1, N);
op3<<<blocks, THREADS>>>(p1, N);
int *p2;
cudaMalloc(&p2, N * sizeof(int));
op4<<<blocks, THREADS>>>(p2, N);
op5<<<blocks, THREADS>>>(p2, N);
cudaFree(p1);
cudaFree(p2);
return 0;
}
|
code for sm_80
Function : _Z3op5Pim
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R5, SR_CTAID.X ; /* 0x0000000000057919 */
/* 0x000e280000002500 */
/*0020*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R5, R5, c[0x0][0x0], R0 ; /* 0x0000000005057a24 */
/* 0x001fca00078e0200 */
/*0040*/ LOP3.LUT R0, R5.reuse, 0x1, RZ, 0xc0, !PT ; /* 0x0000000105007812 */
/* 0x040fe400078ec0ff */
/*0050*/ ISETP.GE.U32.AND P1, PT, R5, c[0x0][0x168], PT ; /* 0x00005a0005007a0c */
/* 0x000fe40003f26070 */
/*0060*/ ISETP.NE.U32.AND P0, PT, R0, 0x1, PT ; /* 0x000000010000780c */
/* 0x000fc80003f05070 */
/*0070*/ ISETP.GE.U32.OR.EX P0, PT, RZ, c[0x0][0x16c], P0, P1 ; /* 0x00005b00ff007a0c */
/* 0x000fda0000706510 */
/*0080*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0090*/ LEA R2, P0, R5, c[0x0][0x160], 0x2 ; /* 0x0000580005027a11 */
/* 0x000fe200078010ff */
/*00a0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*00b0*/ LEA.HI.X R3, R5, c[0x0][0x164], RZ, 0x2, P0 ; /* 0x0000590005037a11 */
/* 0x000fca00000f14ff */
/*00c0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe2000c101904 */
/*00d0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00e0*/ BRA 0xe0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z3op4Pim
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ ULDC.64 UR4, c[0x0][0x168] ; /* 0x00005a0000047ab9 */
/* 0x000fe40000000a00 */
/*0030*/ USHF.R.U64 UR4, UR4, 0x1, UR5 ; /* 0x0000000104047899 */
/* 0x000fe20008001205 */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e220000002100 */
/*0050*/ USHF.R.U32.HI UR5, URZ, 0x1, UR5 ; /* 0x000000013f057899 */
/* 0x000fcc0008011605 */
/*0060*/ IMAD.U32 R2, RZ, RZ, UR5 ; /* 0x00000005ff027e24 */
/* 0x000fe4000f8e00ff */
/*0070*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0080*/ ISETP.LT.U32.AND P0, PT, R0, UR4, PT ; /* 0x0000000400007c0c */
/* 0x000fc8000bf01070 */
/*0090*/ ISETP.GT.U32.AND.EX P0, PT, R2, RZ, PT, P0 ; /* 0x000000ff0200720c */
/* 0x000fda0003f04100 */
/*00a0*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*00b0*/ LEA R2, P0, R0.reuse, c[0x0][0x160], 0x2 ; /* 0x0000580000027a11 */
/* 0x040fe200078010ff */
/*00c0*/ IMAD.MOV.U32 R5, RZ, RZ, 0x3 ; /* 0x00000003ff057424 */
/* 0x000fe200078e00ff */
/*00d0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*00e0*/ LEA.HI.X R3, R0, c[0x0][0x164], RZ, 0x2, P0 ; /* 0x0000590000037a11 */
/* 0x000fca00000f14ff */
/*00f0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe2000c101904 */
/*0100*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0110*/ BRA 0x110; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z3op3Pfm
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ ULDC.64 UR4, c[0x0][0x168] ; /* 0x00005a0000047ab9 */
/* 0x000fe40000000a00 */
/*0030*/ USHF.R.U64 UR4, UR4, 0x1, UR5 ; /* 0x0000000104047899 */
/* 0x000fe20008001205 */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e220000002100 */
/*0050*/ USHF.R.U32.HI UR5, URZ, 0x1, UR5 ; /* 0x000000013f057899 */
/* 0x000fcc0008011605 */
/*0060*/ IMAD.U32 R2, RZ, RZ, UR5 ; /* 0x00000005ff027e24 */
/* 0x000fe4000f8e00ff */
/*0070*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0080*/ ISETP.LT.U32.AND P0, PT, R0, UR4, PT ; /* 0x0000000400007c0c */
/* 0x000fc8000bf01070 */
/*0090*/ ISETP.GT.U32.AND.EX P0, PT, R2, RZ, PT, P0 ; /* 0x000000ff0200720c */
/* 0x000fda0003f04100 */
/*00a0*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*00b0*/ MUFU.RCP64H R3, 10000 ; /* 0x40c3880000037908 */
/* 0x000e220000001800 */
/*00c0*/ IMAD.MOV.U32 R6, RZ, RZ, 0x0 ; /* 0x00000000ff067424 */
/* 0x000fe200078e00ff */
/*00d0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*00e0*/ IMAD.MOV.U32 R7, RZ, RZ, 0x40c38800 ; /* 0x40c38800ff077424 */
/* 0x000fe200078e00ff */
/*00f0*/ BSSY B0, 0x210 ; /* 0x0000011000007945 */
/* 0x000fe20003800000 */
/*0100*/ IMAD.MOV.U32 R2, RZ, RZ, 0x1 ; /* 0x00000001ff027424 */
/* 0x000fcc00078e00ff */
/*0110*/ DFMA R4, R2, -R6, 1 ; /* 0x3ff000000204742b */
/* 0x001e0c0000000806 */
/*0120*/ DFMA R4, R4, R4, R4 ; /* 0x000000040404722b */
/* 0x001e0c0000000004 */
/*0130*/ DFMA R2, R2, R4, R2 ; /* 0x000000040202722b */
/* 0x0010480000000002 */
/*0140*/ I2F.F64.U32 R4, R0 ; /* 0x0000000000047312 */
/* 0x001e240000201800 */
/*0150*/ DFMA R6, R2, -R6, 1 ; /* 0x3ff000000206742b */
/* 0x002e4c0000000806 */
/*0160*/ DFMA R6, R2, R6, R2 ; /* 0x000000060206722b */
/* 0x002e0c0000000002 */
/*0170*/ DMUL R2, R4, R6 ; /* 0x0000000604027228 */
/* 0x001e220000000000 */
/*0180*/ FSETP.GEU.AND P1, PT, |R5|, 6.5827683646048100446e-37, PT ; /* 0x036000000500780b */
/* 0x000fca0003f2e200 */
/*0190*/ DFMA R8, R2, -10000, R4 ; /* 0xc0c388000208782b */
/* 0x001e0c0000000004 */
/*01a0*/ DFMA R2, R6, R8, R2 ; /* 0x000000080602722b */
/* 0x001e140000000002 */
/*01b0*/ FFMA R6, RZ, 6.1103515625, R3 ; /* 0x40c38800ff067823 */
/* 0x001fca0000000003 */
/*01c0*/ FSETP.GT.AND P0, PT, |R6|, 1.469367938527859385e-39, PT ; /* 0x001000000600780b */
/* 0x000fda0003f04200 */
/*01d0*/ @P0 BRA P1, 0x200 ; /* 0x0000002000000947 */
/* 0x000fea0000800000 */
/*01e0*/ MOV R6, 0x200 ; /* 0x0000020000067802 */
/* 0x000fe40000000f00 */
/*01f0*/ CALL.REL.NOINC 0x270 ; /* 0x0000007000007944 */
/* 0x000fea0003c00000 */
/*0200*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0210*/ DADD R2, R2, 1 ; /* 0x3ff0000002027429 */
/* 0x000e220000000000 */
/*0220*/ LEA R4, P0, R0, c[0x0][0x160], 0x2 ; /* 0x0000580000047a11 */
/* 0x000fc800078010ff */
/*0230*/ LEA.HI.X R5, R0, c[0x0][0x164], RZ, 0x2, P0 ; /* 0x0000590000057a11 */
/* 0x000fca00000f14ff */
/*0240*/ F2F.F32.F64 R3, R2 ; /* 0x0000000200037310 */
/* 0x001e240000301000 */
/*0250*/ STG.E [R4.64], R3 ; /* 0x0000000304007986 */
/* 0x001fe2000c101904 */
/*0260*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0270*/ IMAD.MOV.U32 R3, RZ, RZ, 0x3ff38800 ; /* 0x3ff38800ff037424 */
/* 0x000fe200078e00ff */
/*0280*/ FSETP.GEU.AND P1, PT, |R5|.reuse, 1.469367938527859385e-39, PT ; /* 0x001000000500780b */
/* 0x040fe20003f2e200 */
/*0290*/ IMAD.MOV.U32 R2, RZ, RZ, 0x0 ; /* 0x00000000ff027424 */
/* 0x000fe200078e00ff */
/*02a0*/ LOP3.LUT R7, R5, 0x7ff00000, RZ, 0xc0, !PT ; /* 0x7ff0000005077812 */
/* 0x000fe200078ec0ff */
/*02b0*/ MUFU.RCP64H R9, R3 ; /* 0x0000000300097308 */
/* 0x000e220000001800 */
/*02c0*/ IMAD.MOV.U32 R8, RZ, RZ, 0x1 ; /* 0x00000001ff087424 */
/* 0x000fe200078e00ff */
/*02d0*/ BSSY B1, 0x730 ; /* 0x0000045000017945 */
/* 0x000fe20003800000 */
/*02e0*/ IMAD.MOV.U32 R13, RZ, RZ, 0x1ca00000 ; /* 0x1ca00000ff0d7424 */
/* 0x000fe200078e00ff */
/*02f0*/ ISETP.GE.U32.AND P0, PT, R7, 0x40c00000, PT ; /* 0x40c000000700780c */
/* 0x000fe20003f06070 */
/*0300*/ IMAD.MOV.U32 R18, RZ, RZ, R7 ; /* 0x000000ffff127224 */
/* 0x000fc400078e0007 */
/*0310*/ IMAD.MOV.U32 R19, RZ, RZ, 0x40c00000 ; /* 0x40c00000ff137424 */
/* 0x000fe200078e00ff */
/*0320*/ SEL R13, R13, 0x63400000, !P0 ; /* 0x634000000d0d7807 */
/* 0x000fc80004000000 */
/*0330*/ @!P1 LOP3.LUT R12, R13, 0x80000000, R5, 0xf8, !PT ; /* 0x800000000d0c9812 */
/* 0x000fe400078ef805 */
/*0340*/ IADD3 R20, R19, -0x1, RZ ; /* 0xffffffff13147810 */
/* 0x000fe20007ffe0ff */
/*0350*/ DFMA R10, R8, -R2, 1 ; /* 0x3ff00000080a742b */
/* 0x001e0c0000000802 */
/*0360*/ DFMA R10, R10, R10, R10 ; /* 0x0000000a0a0a722b */
/* 0x001e0c000000000a */
/*0370*/ DFMA R14, R8, R10, R8 ; /* 0x0000000a080e722b */
/* 0x0010640000000008 */
/*0380*/ LOP3.LUT R9, R13, 0x800fffff, R5, 0xf8, !PT ; /* 0x800fffff0d097812 */
/* 0x001fe200078ef805 */
/*0390*/ IMAD.MOV.U32 R8, RZ, RZ, R4 ; /* 0x000000ffff087224 */
/* 0x000fe200078e0004 */
/*03a0*/ @!P1 LOP3.LUT R11, R12, 0x100000, RZ, 0xfc, !PT ; /* 0x001000000c0b9812 */
/* 0x000fe200078efcff */
/*03b0*/ @!P1 IMAD.MOV.U32 R10, RZ, RZ, RZ ; /* 0x000000ffff0a9224 */
/* 0x000fe200078e00ff */
/*03c0*/ DFMA R16, R14, -R2, 1 ; /* 0x3ff000000e10742b */
/* 0x002e0a0000000802 */
/*03d0*/ @!P1 DFMA R8, R8, 2, -R10 ; /* 0x400000000808982b */
/* 0x000e48000000080a */
/*03e0*/ DFMA R16, R14, R16, R14 ; /* 0x000000100e10722b */
/* 0x001e0c000000000e */
/*03f0*/ @!P1 LOP3.LUT R18, R9, 0x7ff00000, RZ, 0xc0, !PT ; /* 0x7ff0000009129812 */
/* 0x002fe200078ec0ff */
/*0400*/ DMUL R10, R16, R8 ; /* 0x00000008100a7228 */
/* 0x001e060000000000 */
/*0410*/ IADD3 R12, R18, -0x1, RZ ; /* 0xffffffff120c7810 */
/* 0x000fc60007ffe0ff */
/*0420*/ DFMA R14, R10, -R2, R8 ; /* 0x800000020a0e722b */
/* 0x001e220000000008 */
/*0430*/ ISETP.GT.U32.AND P0, PT, R12, 0x7feffffe, PT ; /* 0x7feffffe0c00780c */
/* 0x000fc80003f04070 */
/*0440*/ ISETP.GT.U32.OR P0, PT, R20, 0x7feffffe, P0 ; /* 0x7feffffe1400780c */
/* 0x000fe20000704470 */
/*0450*/ DFMA R10, R16, R14, R10 ; /* 0x0000000e100a722b */
/* 0x001058000000000a */
/*0460*/ @P0 BRA 0x610 ; /* 0x000001a000000947 */
/* 0x000fea0003800000 */
/*0470*/ IADD3 R7, R7, -0x40c00000, RZ ; /* 0xbf40000007077810 */
/* 0x003fc80007ffe0ff */
/*0480*/ IMNMX R7, R7, -0x46a00000, !PT ; /* 0xb960000007077817 */
/* 0x000fc80007800200 */
/*0490*/ IMNMX R4, R7, 0x46a00000, PT ; /* 0x46a0000007047817 */
/* 0x000fca0003800200 */
/*04a0*/ IMAD.IADD R7, R4, 0x1, -R13 ; /* 0x0000000104077824 */
/* 0x000fe400078e0a0d */
/*04b0*/ IMAD.MOV.U32 R4, RZ, RZ, RZ ; /* 0x000000ffff047224 */
/* 0x000fc600078e00ff */
/*04c0*/ IADD3 R5, R7, 0x7fe00000, RZ ; /* 0x7fe0000007057810 */
/* 0x000fcc0007ffe0ff */
/*04d0*/ DMUL R12, R10, R4 ; /* 0x000000040a0c7228 */
/* 0x000e140000000000 */
/*04e0*/ FSETP.GTU.AND P0, PT, |R13|, 1.469367938527859385e-39, PT ; /* 0x001000000d00780b */
/* 0x001fda0003f0c200 */
/*04f0*/ @P0 BRA 0x720 ; /* 0x0000022000000947 */
/* 0x000fea0003800000 */
/*0500*/ DFMA R2, R10, -R2, R8 ; /* 0x800000020a02722b */
/* 0x000e220000000008 */
/*0510*/ IMAD.MOV.U32 R4, RZ, RZ, RZ ; /* 0x000000ffff047224 */
/* 0x000fd200078e00ff */
/*0520*/ FSETP.NEU.AND P0, PT, R3.reuse, RZ, PT ; /* 0x000000ff0300720b */
/* 0x041fe40003f0d000 */
/*0530*/ LOP3.LUT R2, R3, 0x40c38800, RZ, 0x3c, !PT ; /* 0x40c3880003027812 */
/* 0x000fc800078e3cff */
/*0540*/ LOP3.LUT R9, R2, 0x80000000, RZ, 0xc0, !PT ; /* 0x8000000002097812 */
/* 0x000fc800078ec0ff */
/*0550*/ LOP3.LUT R5, R9, R5, RZ, 0xfc, !PT ; /* 0x0000000509057212 */
/* 0x000fc600078efcff */
/*0560*/ @!P0 BRA 0x720 ; /* 0x000001b000008947 */
/* 0x000fea0003800000 */
/*0570*/ IMAD.MOV R3, RZ, RZ, -R7 ; /* 0x000000ffff037224 */
/* 0x000fe200078e0a07 */
/*0580*/ DMUL.RP R4, R10, R4 ; /* 0x000000040a047228 */
/* 0x000e220000008000 */
/*0590*/ IMAD.MOV.U32 R2, RZ, RZ, RZ ; /* 0x000000ffff027224 */
/* 0x000fe200078e00ff */
/*05a0*/ IADD3 R7, -R7, -0x43300000, RZ ; /* 0xbcd0000007077810 */
/* 0x000fca0007ffe1ff */
/*05b0*/ DFMA R2, R12, -R2, R10 ; /* 0x800000020c02722b */
/* 0x000e46000000000a */
/*05c0*/ LOP3.LUT R9, R5, R9, RZ, 0x3c, !PT ; /* 0x0000000905097212 */
/* 0x001fce00078e3cff */
/*05d0*/ FSETP.NEU.AND P0, PT, |R3|, R7, PT ; /* 0x000000070300720b */
/* 0x002fc80003f0d200 */
/*05e0*/ FSEL R12, R4, R12, !P0 ; /* 0x0000000c040c7208 */
/* 0x000fe40004000000 */
/*05f0*/ FSEL R13, R9, R13, !P0 ; /* 0x0000000d090d7208 */
/* 0x000fe20004000000 */
/*0600*/ BRA 0x720 ; /* 0x0000011000007947 */
/* 0x000fea0003800000 */
/*0610*/ DSETP.NAN.AND P0, PT, R4, R4, PT ; /* 0x000000040400722a */
/* 0x003e1c0003f08000 */
/*0620*/ @P0 BRA 0x700 ; /* 0x000000d000000947 */
/* 0x001fea0003800000 */
/*0630*/ ISETP.NE.AND P0, PT, R18, R19, PT ; /* 0x000000131200720c */
/* 0x000fe20003f05270 */
/*0640*/ IMAD.MOV.U32 R12, RZ, RZ, 0x0 ; /* 0x00000000ff0c7424 */
/* 0x000fe400078e00ff */
/*0650*/ IMAD.MOV.U32 R13, RZ, RZ, -0x80000 ; /* 0xfff80000ff0d7424 */
/* 0x000fd400078e00ff */
/*0660*/ @!P0 BRA 0x720 ; /* 0x000000b000008947 */
/* 0x000fea0003800000 */
/*0670*/ ISETP.NE.AND P0, PT, R18, 0x7ff00000, PT ; /* 0x7ff000001200780c */
/* 0x000fe40003f05270 */
/*0680*/ LOP3.LUT R4, R5, 0x40c38800, RZ, 0x3c, !PT ; /* 0x40c3880005047812 */
/* 0x000fe400078e3cff */
/*0690*/ ISETP.EQ.OR P0, PT, R19, RZ, !P0 ; /* 0x000000ff1300720c */
/* 0x000fe40004702670 */
/*06a0*/ LOP3.LUT R13, R4, 0x80000000, RZ, 0xc0, !PT ; /* 0x80000000040d7812 */
/* 0x000fd600078ec0ff */
/*06b0*/ @P0 LOP3.LUT R2, R13, 0x7ff00000, RZ, 0xfc, !PT ; /* 0x7ff000000d020812 */
/* 0x000fe200078efcff */
/*06c0*/ @!P0 IMAD.MOV.U32 R12, RZ, RZ, RZ ; /* 0x000000ffff0c8224 */
/* 0x000fe400078e00ff */
/*06d0*/ @P0 IMAD.MOV.U32 R12, RZ, RZ, RZ ; /* 0x000000ffff0c0224 */
/* 0x000fe400078e00ff */
/*06e0*/ @P0 IMAD.MOV.U32 R13, RZ, RZ, R2 ; /* 0x000000ffff0d0224 */
/* 0x000fe200078e0002 */
/*06f0*/ BRA 0x720 ; /* 0x0000002000007947 */
/* 0x000fea0003800000 */
/*0700*/ LOP3.LUT R13, R5, 0x80000, RZ, 0xfc, !PT ; /* 0x00080000050d7812 */
/* 0x000fe200078efcff */
/*0710*/ IMAD.MOV.U32 R12, RZ, RZ, R4 ; /* 0x000000ffff0c7224 */
/* 0x000fe400078e0004 */
/*0720*/ BSYNC B1 ; /* 0x0000000000017941 */
/* 0x000fea0003800000 */
/*0730*/ IMAD.MOV.U32 R7, RZ, RZ, 0x0 ; /* 0x00000000ff077424 */
/* 0x000fe400078e00ff */
/*0740*/ IMAD.MOV.U32 R2, RZ, RZ, R12 ; /* 0x000000ffff027224 */
/* 0x000fe400078e000c */
/*0750*/ IMAD.MOV.U32 R3, RZ, RZ, R13 ; /* 0x000000ffff037224 */
/* 0x000fe200078e000d */
/*0760*/ RET.REL.NODEC R6 0x0 ; /* 0xfffff89006007950 */
/* 0x000fec0003c3ffff */
/*0770*/ BRA 0x770; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0780*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0790*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*07a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*07b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*07c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*07d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*07e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*07f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z3op2Pfm
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ ULDC.64 UR4, c[0x0][0x168] ; /* 0x00005a0000047ab9 */
/* 0x000fe40000000a00 */
/*0030*/ USHF.R.U64 UR4, UR4, 0x2, UR5 ; /* 0x0000000204047899 */
/* 0x000fe20008001205 */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e220000002100 */
/*0050*/ USHF.R.U32.HI UR5, URZ, 0x2, UR5 ; /* 0x000000023f057899 */
/* 0x000fcc0008011605 */
/*0060*/ IMAD.U32 R2, RZ, RZ, UR5 ; /* 0x00000005ff027e24 */
/* 0x000fe4000f8e00ff */
/*0070*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0080*/ ISETP.LT.U32.AND P0, PT, R0, UR4, PT ; /* 0x0000000400007c0c */
/* 0x000fc8000bf01070 */
/*0090*/ ISETP.GT.U32.AND.EX P0, PT, R2, RZ, PT, P0 ; /* 0x000000ff0200720c */
/* 0x000fda0003f04100 */
/*00a0*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*00b0*/ LEA R2, P0, R0, c[0x0][0x160], 0x2 ; /* 0x0000580000027a11 */
/* 0x000fe200078010ff */
/*00c0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*00d0*/ LEA.HI.X R3, R0, c[0x0][0x164], RZ, 0x2, P0 ; /* 0x0000590000037a11 */
/* 0x000fca00000f14ff */
/*00e0*/ STG.E [R2.64], RZ ; /* 0x000000ff02007986 */
/* 0x000fe2000c101904 */
/*00f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0100*/ BRA 0x100; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z3op1Pfm
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ ULDC.64 UR4, c[0x0][0x168] ; /* 0x00005a0000047ab9 */
/* 0x000fe40000000a00 */
/*0030*/ USHF.R.U64 UR4, UR4, 0x1, UR5 ; /* 0x0000000104047899 */
/* 0x000fe20008001205 */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e220000002100 */
/*0050*/ USHF.R.U32.HI UR5, URZ, 0x1, UR5 ; /* 0x000000013f057899 */
/* 0x000fcc0008011605 */
/*0060*/ IMAD.U32 R2, RZ, RZ, UR5 ; /* 0x00000005ff027e24 */
/* 0x000fe4000f8e00ff */
/*0070*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0080*/ ISETP.LT.U32.AND P0, PT, R0, UR4, PT ; /* 0x0000000400007c0c */
/* 0x000fc8000bf01070 */
/*0090*/ ISETP.GT.U32.AND.EX P0, PT, R2, RZ, PT, P0 ; /* 0x000000ff0200720c */
/* 0x000fda0003f04100 */
/*00a0*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*00b0*/ LEA R2, P0, R0, c[0x0][0x160], 0x2 ; /* 0x0000580000027a11 */
/* 0x000fe200078010ff */
/*00c0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*00d0*/ LEA.HI.X R3, R0, c[0x0][0x164], RZ, 0x2, P0 ; /* 0x0000590000037a11 */
/* 0x000fca00000f14ff */
/*00e0*/ LDG.E R0, [R2.64] ; /* 0x0000000402007981 */
/* 0x000ea4000c1e1900 */
/*00f0*/ FADD R5, R0, 1 ; /* 0x3f80000000057421 */
/* 0x004fca0000000000 */
/*0100*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe2000c101904 */
/*0110*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0120*/ BRA 0x120; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
|
#include <cuda.h>
#include <cuda_runtime.h>
// read: single value-0 (50 % access)
// write: single value-1 (50 %access)
__global__
void op1(float *p, size_t N) {
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < N / 2) {
p[idx] = p[idx] + 1;
}
}
// zero value (25% access)
__global__
void op2(float *p, size_t N) {
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < N / 4) {
p[idx] = 0.0;
}
}
// approximate
__global__
void op3(float *p, size_t N) {
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < N / 2) {
p[idx] = 1.0 + idx / 10000.0;
}
}
// dense value (50% access)
__global__
void op4(int *p, size_t N) {
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < N / 2) {
p[idx] = 3;
}
}
// structured value (50% access)
__global__
void op5(int *p, size_t N) {
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx % 2 && idx < N) {
p[idx] = idx;
}
}
static const size_t N = 1000;
static const int THREADS = 128;
int main() {
float *p1;
cudaMalloc(&p1, N * sizeof(float));
cudaMemset(&p1, 0, N * sizeof(float));
auto blocks = (N - 1) / THREADS + 1;
op1<<<blocks, THREADS>>>(p1, N);
op2<<<blocks, THREADS>>>(p1, N);
op3<<<blocks, THREADS>>>(p1, N);
int *p2;
cudaMalloc(&p2, N * sizeof(int));
op4<<<blocks, THREADS>>>(p2, N);
op5<<<blocks, THREADS>>>(p2, N);
cudaFree(p1);
cudaFree(p2);
return 0;
}
|
.file "tmpxft_00083a47_00000000-6_main.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2030:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2030:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z23__device_stub__Z3op1PfmPfm
.type _Z23__device_stub__Z3op1PfmPfm, @function
_Z23__device_stub__Z3op1PfmPfm:
.LFB2052:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movq %rsi, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movq %rsp, %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z3op1Pfm(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2052:
.size _Z23__device_stub__Z3op1PfmPfm, .-_Z23__device_stub__Z3op1PfmPfm
.globl _Z3op1Pfm
.type _Z3op1Pfm, @function
_Z3op1Pfm:
.LFB2053:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z23__device_stub__Z3op1PfmPfm
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2053:
.size _Z3op1Pfm, .-_Z3op1Pfm
.globl _Z23__device_stub__Z3op2PfmPfm
.type _Z23__device_stub__Z3op2PfmPfm, @function
_Z23__device_stub__Z3op2PfmPfm:
.LFB2054:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movq %rsi, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movq %rsp, %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z3op2Pfm(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2054:
.size _Z23__device_stub__Z3op2PfmPfm, .-_Z23__device_stub__Z3op2PfmPfm
.globl _Z3op2Pfm
.type _Z3op2Pfm, @function
_Z3op2Pfm:
.LFB2055:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z23__device_stub__Z3op2PfmPfm
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2055:
.size _Z3op2Pfm, .-_Z3op2Pfm
.globl _Z23__device_stub__Z3op3PfmPfm
.type _Z23__device_stub__Z3op3PfmPfm, @function
_Z23__device_stub__Z3op3PfmPfm:
.LFB2056:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movq %rsi, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movq %rsp, %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L23
.L19:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L24
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L23:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z3op3Pfm(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L19
.L24:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2056:
.size _Z23__device_stub__Z3op3PfmPfm, .-_Z23__device_stub__Z3op3PfmPfm
.globl _Z3op3Pfm
.type _Z3op3Pfm, @function
_Z3op3Pfm:
.LFB2057:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z23__device_stub__Z3op3PfmPfm
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2057:
.size _Z3op3Pfm, .-_Z3op3Pfm
.globl _Z23__device_stub__Z3op4PimPim
.type _Z23__device_stub__Z3op4PimPim, @function
_Z23__device_stub__Z3op4PimPim:
.LFB2058:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movq %rsi, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movq %rsp, %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L31
.L27:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L32
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L31:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z3op4Pim(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L27
.L32:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2058:
.size _Z23__device_stub__Z3op4PimPim, .-_Z23__device_stub__Z3op4PimPim
.globl _Z3op4Pim
.type _Z3op4Pim, @function
_Z3op4Pim:
.LFB2059:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z23__device_stub__Z3op4PimPim
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2059:
.size _Z3op4Pim, .-_Z3op4Pim
.globl _Z23__device_stub__Z3op5PimPim
.type _Z23__device_stub__Z3op5PimPim, @function
_Z23__device_stub__Z3op5PimPim:
.LFB2060:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movq %rsi, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movq %rsp, %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L39
.L35:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L40
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L39:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z3op5Pim(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L35
.L40:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2060:
.size _Z23__device_stub__Z3op5PimPim, .-_Z23__device_stub__Z3op5PimPim
.globl _Z3op5Pim
.type _Z3op5Pim, @function
_Z3op5Pim:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z23__device_stub__Z3op5PimPim
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _Z3op5Pim, .-_Z3op5Pim
.globl main
.type main, @function
main:
.LFB2027:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
subq $48, %rsp
.cfi_def_cfa_offset 64
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
movq %rsp, %rbx
movl $4000, %esi
movq %rbx, %rdi
call cudaMalloc@PLT
movl $4000, %edx
movl $0, %esi
movq %rbx, %rdi
call cudaMemset@PLT
movl $128, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $8, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 28(%rsp), %rdx
movl $1, %ecx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L51
.L44:
movl $128, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $8, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 28(%rsp), %rdx
movl $1, %ecx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L52
.L45:
movl $128, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $8, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 28(%rsp), %rdx
movl $1, %ecx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L53
.L46:
leaq 8(%rsp), %rdi
movl $4000, %esi
call cudaMalloc@PLT
movl $128, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $8, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 28(%rsp), %rdx
movl $1, %ecx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L54
.L47:
movl $128, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $8, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 28(%rsp), %rdx
movl $1, %ecx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L55
.L48:
movq (%rsp), %rdi
call cudaFree@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L56
movl $0, %eax
addq $48, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
ret
.L51:
.cfi_restore_state
movl $1000, %esi
movq (%rsp), %rdi
call _Z23__device_stub__Z3op1PfmPfm
jmp .L44
.L52:
movl $1000, %esi
movq (%rsp), %rdi
call _Z23__device_stub__Z3op2PfmPfm
jmp .L45
.L53:
movl $1000, %esi
movq (%rsp), %rdi
call _Z23__device_stub__Z3op3PfmPfm
jmp .L46
.L54:
movl $1000, %esi
movq 8(%rsp), %rdi
call _Z23__device_stub__Z3op4PimPim
jmp .L47
.L55:
movl $1000, %esi
movq 8(%rsp), %rdi
call _Z23__device_stub__Z3op5PimPim
jmp .L48
.L56:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2027:
.size main, .-main
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z3op5Pim"
.LC1:
.string "_Z3op4Pim"
.LC2:
.string "_Z3op3Pfm"
.LC3:
.string "_Z3op2Pfm"
.LC4:
.string "_Z3op1Pfm"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2063:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z3op5Pim(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z3op4Pim(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z3op3Pfm(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq _Z3op2Pfm(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC4(%rip), %rdx
movq %rdx, %rcx
leaq _Z3op1Pfm(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2063:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4:
|
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
|
#include <cuda.h>
#include <cuda_runtime.h>
// read: single value-0 (50 % access)
// write: single value-1 (50 %access)
__global__
void op1(float *p, size_t N) {
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < N / 2) {
p[idx] = p[idx] + 1;
}
}
// zero value (25% access)
__global__
void op2(float *p, size_t N) {
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < N / 4) {
p[idx] = 0.0;
}
}
// approximate
__global__
void op3(float *p, size_t N) {
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < N / 2) {
p[idx] = 1.0 + idx / 10000.0;
}
}
// dense value (50% access)
__global__
void op4(int *p, size_t N) {
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < N / 2) {
p[idx] = 3;
}
}
// structured value (50% access)
__global__
void op5(int *p, size_t N) {
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx % 2 && idx < N) {
p[idx] = idx;
}
}
static const size_t N = 1000;
static const int THREADS = 128;
int main() {
float *p1;
cudaMalloc(&p1, N * sizeof(float));
cudaMemset(&p1, 0, N * sizeof(float));
auto blocks = (N - 1) / THREADS + 1;
op1<<<blocks, THREADS>>>(p1, N);
op2<<<blocks, THREADS>>>(p1, N);
op3<<<blocks, THREADS>>>(p1, N);
int *p2;
cudaMalloc(&p2, N * sizeof(int));
op4<<<blocks, THREADS>>>(p2, N);
op5<<<blocks, THREADS>>>(p2, N);
cudaFree(p1);
cudaFree(p2);
return 0;
}
|
#include <hip/hip_runtime.h>
// read: single value-0 (50 % access)
// write: single value-1 (50 %access)
__global__
void op1(float *p, size_t N) {
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < N / 2) {
p[idx] = p[idx] + 1;
}
}
// zero value (25% access)
__global__
void op2(float *p, size_t N) {
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < N / 4) {
p[idx] = 0.0;
}
}
// approximate
__global__
void op3(float *p, size_t N) {
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < N / 2) {
p[idx] = 1.0 + idx / 10000.0;
}
}
// dense value (50% access)
__global__
void op4(int *p, size_t N) {
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < N / 2) {
p[idx] = 3;
}
}
// structured value (50% access)
__global__
void op5(int *p, size_t N) {
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx % 2 && idx < N) {
p[idx] = idx;
}
}
static const size_t N = 1000;
static const int THREADS = 128;
int main() {
float *p1;
hipMalloc(&p1, N * sizeof(float));
hipMemset(&p1, 0, N * sizeof(float));
auto blocks = (N - 1) / THREADS + 1;
op1<<<blocks, THREADS>>>(p1, N);
op2<<<blocks, THREADS>>>(p1, N);
op3<<<blocks, THREADS>>>(p1, N);
int *p2;
hipMalloc(&p2, N * sizeof(int));
op4<<<blocks, THREADS>>>(p2, N);
op5<<<blocks, THREADS>>>(p2, N);
hipFree(p1);
hipFree(p2);
return 0;
}
|
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
|
#include <hip/hip_runtime.h>
// read: single value-0 (50 % access)
// write: single value-1 (50 %access)
__global__
void op1(float *p, size_t N) {
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < N / 2) {
p[idx] = p[idx] + 1;
}
}
// zero value (25% access)
__global__
void op2(float *p, size_t N) {
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < N / 4) {
p[idx] = 0.0;
}
}
// approximate
__global__
void op3(float *p, size_t N) {
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < N / 2) {
p[idx] = 1.0 + idx / 10000.0;
}
}
// dense value (50% access)
__global__
void op4(int *p, size_t N) {
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < N / 2) {
p[idx] = 3;
}
}
// structured value (50% access)
__global__
void op5(int *p, size_t N) {
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx % 2 && idx < N) {
p[idx] = idx;
}
}
static const size_t N = 1000;
static const int THREADS = 128;
int main() {
float *p1;
hipMalloc(&p1, N * sizeof(float));
hipMemset(&p1, 0, N * sizeof(float));
auto blocks = (N - 1) / THREADS + 1;
op1<<<blocks, THREADS>>>(p1, N);
op2<<<blocks, THREADS>>>(p1, N);
op3<<<blocks, THREADS>>>(p1, N);
int *p2;
hipMalloc(&p2, N * sizeof(int));
op4<<<blocks, THREADS>>>(p2, N);
op5<<<blocks, THREADS>>>(p2, N);
hipFree(p1);
hipFree(p2);
return 0;
}
|
.text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z3op1Pfm
.globl _Z3op1Pfm
.p2align 8
.type _Z3op1Pfm,@function
_Z3op1Pfm:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x1c
s_load_b64 s[2:3], s[0:1], 0x8
s_waitcnt lgkmcnt(0)
s_and_b32 s4, s4, 0xffff
s_lshr_b64 s[2:3], s[2:3], 1
v_mad_u64_u32 v[1:2], null, s15, s4, v[0:1]
v_mov_b32_e32 v2, 0
s_delay_alu instid0(VALU_DEP_1)
v_cmp_gt_u64_e32 vcc_lo, s[2:3], v[1:2]
s_and_saveexec_b32 s2, vcc_lo
s_cbranch_execz .LBB0_2
s_load_b64 s[0:1], s[0:1], 0x0
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_load_b32 v2, v[0:1], off
s_waitcnt vmcnt(0)
v_add_f32_e32 v2, 1.0, v2
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z3op1Pfm
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z3op1Pfm, .Lfunc_end0-_Z3op1Pfm
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z3op2Pfm
.globl _Z3op2Pfm
.p2align 8
.type _Z3op2Pfm,@function
_Z3op2Pfm:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x1c
s_load_b64 s[2:3], s[0:1], 0x8
s_waitcnt lgkmcnt(0)
s_and_b32 s4, s4, 0xffff
s_lshr_b64 s[2:3], s[2:3], 2
v_mad_u64_u32 v[1:2], null, s15, s4, v[0:1]
v_mov_b32_e32 v2, 0
s_delay_alu instid0(VALU_DEP_1)
v_cmp_gt_u64_e32 vcc_lo, s[2:3], v[1:2]
s_and_saveexec_b32 s2, vcc_lo
s_cbranch_execz .LBB1_2
s_load_b64 s[0:1], s[0:1], 0x0
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b32 v[0:1], v2, off
.LBB1_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z3op2Pfm
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z3op2Pfm, .Lfunc_end1-_Z3op2Pfm
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z3op3Pfm
.globl _Z3op3Pfm
.p2align 8
.type _Z3op3Pfm,@function
_Z3op3Pfm:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x1c
s_load_b64 s[2:3], s[0:1], 0x8
s_waitcnt lgkmcnt(0)
s_and_b32 s4, s4, 0xffff
s_lshr_b64 s[2:3], s[2:3], 1
v_mad_u64_u32 v[1:2], null, s15, s4, v[0:1]
v_mov_b32_e32 v2, 0
s_delay_alu instid0(VALU_DEP_1)
v_cmp_gt_u64_e32 vcc_lo, s[2:3], v[1:2]
s_and_saveexec_b32 s2, vcc_lo
s_cbranch_execz .LBB2_2
v_cvt_f64_u32_e32 v[3:4], v1
s_load_b64 s[0:1], s[0:1], 0x0
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_div_scale_f64 v[5:6], null, 0x40c38800, 0x40c38800, v[3:4]
v_rcp_f64_e32 v[7:8], v[5:6]
s_waitcnt_depctr 0xfff
v_fma_f64 v[9:10], -v[5:6], v[7:8], 1.0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f64 v[7:8], v[7:8], v[9:10], v[7:8]
v_fma_f64 v[9:10], -v[5:6], v[7:8], 1.0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_fma_f64 v[7:8], v[7:8], v[9:10], v[7:8]
v_div_scale_f64 v[9:10], vcc_lo, v[3:4], 0x40c38800, v[3:4]
v_mul_f64 v[11:12], v[9:10], v[7:8]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f64 v[5:6], -v[5:6], v[11:12], v[9:10]
v_div_fmas_f64 v[5:6], v[5:6], v[7:8], v[11:12]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_div_fixup_f64 v[3:4], v[5:6], 0x40c38800, v[3:4]
v_add_f64 v[3:4], v[3:4], 1.0
s_delay_alu instid0(VALU_DEP_1)
v_cvt_f32_f64_e32 v3, v[3:4]
global_store_b32 v[0:1], v3, off
.LBB2_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z3op3Pfm
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 13
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end2:
.size _Z3op3Pfm, .Lfunc_end2-_Z3op3Pfm
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z3op4Pim
.globl _Z3op4Pim
.p2align 8
.type _Z3op4Pim,@function
_Z3op4Pim:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x1c
s_load_b64 s[2:3], s[0:1], 0x8
s_waitcnt lgkmcnt(0)
s_and_b32 s4, s4, 0xffff
s_lshr_b64 s[2:3], s[2:3], 1
v_mad_u64_u32 v[1:2], null, s15, s4, v[0:1]
v_mov_b32_e32 v2, 0
s_delay_alu instid0(VALU_DEP_1)
v_cmp_gt_u64_e32 vcc_lo, s[2:3], v[1:2]
s_and_saveexec_b32 s2, vcc_lo
s_cbranch_execz .LBB3_2
s_load_b64 s[0:1], s[0:1], 0x0
v_lshlrev_b64 v[0:1], 2, v[1:2]
v_mov_b32_e32 v2, 3
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b32 v[0:1], v2, off
.LBB3_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z3op4Pim
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end3:
.size _Z3op4Pim, .Lfunc_end3-_Z3op4Pim
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z3op5Pim
.globl _Z3op5Pim
.p2align 8
.type _Z3op5Pim,@function
_Z3op5Pim:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x1c
s_load_b64 s[2:3], s[0:1], 0x8
s_waitcnt lgkmcnt(0)
s_and_b32 s4, s4, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_mad_u64_u32 v[1:2], null, s15, s4, v[0:1]
v_mov_b32_e32 v2, 0
v_and_b32_e32 v0, 1, v1
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cmp_gt_u64_e32 vcc_lo, s[2:3], v[1:2]
v_cmp_eq_u32_e64 s2, 1, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s2, vcc_lo, s2
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB4_2
s_load_b64 s[0:1], s[0:1], 0x0
v_lshlrev_b64 v[2:3], 2, v[1:2]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, s0, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v3, vcc_lo
global_store_b32 v[2:3], v1, off
.LBB4_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z3op5Pim
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end4:
.size _Z3op5Pim, .Lfunc_end4-_Z3op5Pim
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 8
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z3op1Pfm
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z3op1Pfm.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 8
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z3op2Pfm
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z3op2Pfm.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 8
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z3op3Pfm
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z3op3Pfm.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 13
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 8
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z3op4Pim
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z3op4Pim.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 8
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z3op5Pim
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z3op5Pim.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata
|
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
|
#include <hip/hip_runtime.h>
// read: single value-0 (50 % access)
// write: single value-1 (50 %access)
__global__
void op1(float *p, size_t N) {
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < N / 2) {
p[idx] = p[idx] + 1;
}
}
// zero value (25% access)
__global__
void op2(float *p, size_t N) {
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < N / 4) {
p[idx] = 0.0;
}
}
// approximate
__global__
void op3(float *p, size_t N) {
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < N / 2) {
p[idx] = 1.0 + idx / 10000.0;
}
}
// dense value (50% access)
__global__
void op4(int *p, size_t N) {
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < N / 2) {
p[idx] = 3;
}
}
// structured value (50% access)
__global__
void op5(int *p, size_t N) {
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx % 2 && idx < N) {
p[idx] = idx;
}
}
static const size_t N = 1000;
static const int THREADS = 128;
int main() {
float *p1;
hipMalloc(&p1, N * sizeof(float));
hipMemset(&p1, 0, N * sizeof(float));
auto blocks = (N - 1) / THREADS + 1;
op1<<<blocks, THREADS>>>(p1, N);
op2<<<blocks, THREADS>>>(p1, N);
op3<<<blocks, THREADS>>>(p1, N);
int *p2;
hipMalloc(&p2, N * sizeof(int));
op4<<<blocks, THREADS>>>(p2, N);
op5<<<blocks, THREADS>>>(p2, N);
hipFree(p1);
hipFree(p2);
return 0;
}
|
.text
.file "main.hip"
.globl _Z18__device_stub__op1Pfm # -- Begin function _Z18__device_stub__op1Pfm
.p2align 4, 0x90
.type _Z18__device_stub__op1Pfm,@function
_Z18__device_stub__op1Pfm: # @_Z18__device_stub__op1Pfm
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z3op1Pfm, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z18__device_stub__op1Pfm, .Lfunc_end0-_Z18__device_stub__op1Pfm
.cfi_endproc
# -- End function
.globl _Z18__device_stub__op2Pfm # -- Begin function _Z18__device_stub__op2Pfm
.p2align 4, 0x90
.type _Z18__device_stub__op2Pfm,@function
_Z18__device_stub__op2Pfm: # @_Z18__device_stub__op2Pfm
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z3op2Pfm, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end1:
.size _Z18__device_stub__op2Pfm, .Lfunc_end1-_Z18__device_stub__op2Pfm
.cfi_endproc
# -- End function
.globl _Z18__device_stub__op3Pfm # -- Begin function _Z18__device_stub__op3Pfm
.p2align 4, 0x90
.type _Z18__device_stub__op3Pfm,@function
_Z18__device_stub__op3Pfm: # @_Z18__device_stub__op3Pfm
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z3op3Pfm, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end2:
.size _Z18__device_stub__op3Pfm, .Lfunc_end2-_Z18__device_stub__op3Pfm
.cfi_endproc
# -- End function
.globl _Z18__device_stub__op4Pim # -- Begin function _Z18__device_stub__op4Pim
.p2align 4, 0x90
.type _Z18__device_stub__op4Pim,@function
_Z18__device_stub__op4Pim: # @_Z18__device_stub__op4Pim
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z3op4Pim, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end3:
.size _Z18__device_stub__op4Pim, .Lfunc_end3-_Z18__device_stub__op4Pim
.cfi_endproc
# -- End function
.globl _Z18__device_stub__op5Pim # -- Begin function _Z18__device_stub__op5Pim
.p2align 4, 0x90
.type _Z18__device_stub__op5Pim,@function
_Z18__device_stub__op5Pim: # @_Z18__device_stub__op5Pim
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z3op5Pim, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end4:
.size _Z18__device_stub__op5Pim, .Lfunc_end4-_Z18__device_stub__op5Pim
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
subq $104, %rsp
.cfi_def_cfa_offset 128
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
movabsq $4294967304, %rbx # imm = 0x100000008
leaq 88(%rsp), %r14
movl $4000, %esi # imm = 0xFA0
movq %r14, %rdi
callq hipMalloc
movl $4000, %edx # imm = 0xFA0
movq %r14, %rdi
xorl %esi, %esi
callq hipMemset
leaq 120(%rbx), %r14
movq %rbx, %rdi
movl $1, %esi
movq %r14, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB5_2
# %bb.1:
movq 88(%rsp), %rax
movq %rax, 56(%rsp)
movq $1000, 48(%rsp) # imm = 0x3E8
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z3op1Pfm, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB5_2:
movq %rbx, %rdi
movl $1, %esi
movq %r14, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB5_4
# %bb.3:
movq 88(%rsp), %rax
movq %rax, 56(%rsp)
movq $1000, 48(%rsp) # imm = 0x3E8
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z3op2Pfm, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB5_4:
movq %rbx, %rdi
movl $1, %esi
movq %r14, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB5_6
# %bb.5:
movq 88(%rsp), %rax
movq %rax, 56(%rsp)
movq $1000, 48(%rsp) # imm = 0x3E8
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z3op3Pfm, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB5_6:
leaq 96(%rsp), %rdi
movl $4000, %esi # imm = 0xFA0
callq hipMalloc
movq %rbx, %rdi
movl $1, %esi
movq %r14, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB5_8
# %bb.7:
movq 96(%rsp), %rax
movq %rax, 56(%rsp)
movq $1000, 48(%rsp) # imm = 0x3E8
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z3op4Pim, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB5_8:
movq %rbx, %rdi
movl $1, %esi
movq %r14, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB5_10
# %bb.9:
movq 96(%rsp), %rax
movq %rax, 56(%rsp)
movq $1000, 48(%rsp) # imm = 0x3E8
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z3op5Pim, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB5_10:
movq 88(%rsp), %rdi
callq hipFree
movq 96(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $104, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
retq
.Lfunc_end5:
.size main, .Lfunc_end5-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB6_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB6_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z3op1Pfm, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z3op2Pfm, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z3op3Pfm, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z3op4Pim, %esi
movl $.L__unnamed_4, %edx
movl $.L__unnamed_4, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z3op5Pim, %esi
movl $.L__unnamed_5, %edx
movl $.L__unnamed_5, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end6:
.size __hip_module_ctor, .Lfunc_end6-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB7_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB7_2:
retq
.Lfunc_end7:
.size __hip_module_dtor, .Lfunc_end7-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z3op1Pfm,@object # @_Z3op1Pfm
.section .rodata,"a",@progbits
.globl _Z3op1Pfm
.p2align 3, 0x0
_Z3op1Pfm:
.quad _Z18__device_stub__op1Pfm
.size _Z3op1Pfm, 8
.type _Z3op2Pfm,@object # @_Z3op2Pfm
.globl _Z3op2Pfm
.p2align 3, 0x0
_Z3op2Pfm:
.quad _Z18__device_stub__op2Pfm
.size _Z3op2Pfm, 8
.type _Z3op3Pfm,@object # @_Z3op3Pfm
.globl _Z3op3Pfm
.p2align 3, 0x0
_Z3op3Pfm:
.quad _Z18__device_stub__op3Pfm
.size _Z3op3Pfm, 8
.type _Z3op4Pim,@object # @_Z3op4Pim
.globl _Z3op4Pim
.p2align 3, 0x0
_Z3op4Pim:
.quad _Z18__device_stub__op4Pim
.size _Z3op4Pim, 8
.type _Z3op5Pim,@object # @_Z3op5Pim
.globl _Z3op5Pim
.p2align 3, 0x0
_Z3op5Pim:
.quad _Z18__device_stub__op5Pim
.size _Z3op5Pim, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z3op1Pfm"
.size .L__unnamed_1, 10
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z3op2Pfm"
.size .L__unnamed_2, 10
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "_Z3op3Pfm"
.size .L__unnamed_3, 10
.type .L__unnamed_4,@object # @3
.L__unnamed_4:
.asciz "_Z3op4Pim"
.size .L__unnamed_4, 10
.type .L__unnamed_5,@object # @4
.L__unnamed_5:
.asciz "_Z3op5Pim"
.size .L__unnamed_5, 10
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z18__device_stub__op1Pfm
.addrsig_sym _Z18__device_stub__op2Pfm
.addrsig_sym _Z18__device_stub__op3Pfm
.addrsig_sym _Z18__device_stub__op4Pim
.addrsig_sym _Z18__device_stub__op5Pim
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z3op1Pfm
.addrsig_sym _Z3op2Pfm
.addrsig_sym _Z3op3Pfm
.addrsig_sym _Z3op4Pim
.addrsig_sym _Z3op5Pim
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_
|
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
|
.file "tmpxft_00083a47_00000000-6_main.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2030:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2030:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z23__device_stub__Z3op1PfmPfm
.type _Z23__device_stub__Z3op1PfmPfm, @function
_Z23__device_stub__Z3op1PfmPfm:
.LFB2052:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movq %rsi, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movq %rsp, %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z3op1Pfm(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2052:
.size _Z23__device_stub__Z3op1PfmPfm, .-_Z23__device_stub__Z3op1PfmPfm
.globl _Z3op1Pfm
.type _Z3op1Pfm, @function
_Z3op1Pfm:
.LFB2053:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z23__device_stub__Z3op1PfmPfm
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2053:
.size _Z3op1Pfm, .-_Z3op1Pfm
.globl _Z23__device_stub__Z3op2PfmPfm
.type _Z23__device_stub__Z3op2PfmPfm, @function
_Z23__device_stub__Z3op2PfmPfm:
.LFB2054:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movq %rsi, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movq %rsp, %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z3op2Pfm(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2054:
.size _Z23__device_stub__Z3op2PfmPfm, .-_Z23__device_stub__Z3op2PfmPfm
.globl _Z3op2Pfm
.type _Z3op2Pfm, @function
_Z3op2Pfm:
.LFB2055:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z23__device_stub__Z3op2PfmPfm
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2055:
.size _Z3op2Pfm, .-_Z3op2Pfm
.globl _Z23__device_stub__Z3op3PfmPfm
.type _Z23__device_stub__Z3op3PfmPfm, @function
_Z23__device_stub__Z3op3PfmPfm:
.LFB2056:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movq %rsi, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movq %rsp, %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L23
.L19:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L24
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L23:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z3op3Pfm(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L19
.L24:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2056:
.size _Z23__device_stub__Z3op3PfmPfm, .-_Z23__device_stub__Z3op3PfmPfm
.globl _Z3op3Pfm
.type _Z3op3Pfm, @function
_Z3op3Pfm:
.LFB2057:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z23__device_stub__Z3op3PfmPfm
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2057:
.size _Z3op3Pfm, .-_Z3op3Pfm
.globl _Z23__device_stub__Z3op4PimPim
.type _Z23__device_stub__Z3op4PimPim, @function
_Z23__device_stub__Z3op4PimPim:
.LFB2058:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movq %rsi, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movq %rsp, %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L31
.L27:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L32
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L31:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z3op4Pim(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L27
.L32:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2058:
.size _Z23__device_stub__Z3op4PimPim, .-_Z23__device_stub__Z3op4PimPim
.globl _Z3op4Pim
.type _Z3op4Pim, @function
_Z3op4Pim:
.LFB2059:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z23__device_stub__Z3op4PimPim
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2059:
.size _Z3op4Pim, .-_Z3op4Pim
.globl _Z23__device_stub__Z3op5PimPim
.type _Z23__device_stub__Z3op5PimPim, @function
_Z23__device_stub__Z3op5PimPim:
.LFB2060:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movq %rsi, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movq %rsp, %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L39
.L35:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L40
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L39:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z3op5Pim(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L35
.L40:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2060:
.size _Z23__device_stub__Z3op5PimPim, .-_Z23__device_stub__Z3op5PimPim
.globl _Z3op5Pim
.type _Z3op5Pim, @function
_Z3op5Pim:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z23__device_stub__Z3op5PimPim
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _Z3op5Pim, .-_Z3op5Pim
.globl main
.type main, @function
main:
.LFB2027:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
subq $48, %rsp
.cfi_def_cfa_offset 64
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
movq %rsp, %rbx
movl $4000, %esi
movq %rbx, %rdi
call cudaMalloc@PLT
movl $4000, %edx
movl $0, %esi
movq %rbx, %rdi
call cudaMemset@PLT
movl $128, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $8, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 28(%rsp), %rdx
movl $1, %ecx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L51
.L44:
movl $128, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $8, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 28(%rsp), %rdx
movl $1, %ecx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L52
.L45:
movl $128, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $8, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 28(%rsp), %rdx
movl $1, %ecx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L53
.L46:
leaq 8(%rsp), %rdi
movl $4000, %esi
call cudaMalloc@PLT
movl $128, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $8, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 28(%rsp), %rdx
movl $1, %ecx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L54
.L47:
movl $128, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $8, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 28(%rsp), %rdx
movl $1, %ecx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L55
.L48:
movq (%rsp), %rdi
call cudaFree@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L56
movl $0, %eax
addq $48, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
ret
.L51:
.cfi_restore_state
movl $1000, %esi
movq (%rsp), %rdi
call _Z23__device_stub__Z3op1PfmPfm
jmp .L44
.L52:
movl $1000, %esi
movq (%rsp), %rdi
call _Z23__device_stub__Z3op2PfmPfm
jmp .L45
.L53:
movl $1000, %esi
movq (%rsp), %rdi
call _Z23__device_stub__Z3op3PfmPfm
jmp .L46
.L54:
movl $1000, %esi
movq 8(%rsp), %rdi
call _Z23__device_stub__Z3op4PimPim
jmp .L47
.L55:
movl $1000, %esi
movq 8(%rsp), %rdi
call _Z23__device_stub__Z3op5PimPim
jmp .L48
.L56:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2027:
.size main, .-main
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z3op5Pim"
.LC1:
.string "_Z3op4Pim"
.LC2:
.string "_Z3op3Pfm"
.LC3:
.string "_Z3op2Pfm"
.LC4:
.string "_Z3op1Pfm"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2063:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z3op5Pim(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z3op4Pim(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z3op3Pfm(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq _Z3op2Pfm(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC4(%rip), %rdx
movq %rdx, %rcx
leaq _Z3op1Pfm(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2063:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4:
|
.text
.file "main.hip"
.globl _Z18__device_stub__op1Pfm # -- Begin function _Z18__device_stub__op1Pfm
.p2align 4, 0x90
.type _Z18__device_stub__op1Pfm,@function
_Z18__device_stub__op1Pfm: # @_Z18__device_stub__op1Pfm
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z3op1Pfm, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z18__device_stub__op1Pfm, .Lfunc_end0-_Z18__device_stub__op1Pfm
.cfi_endproc
# -- End function
.globl _Z18__device_stub__op2Pfm # -- Begin function _Z18__device_stub__op2Pfm
.p2align 4, 0x90
.type _Z18__device_stub__op2Pfm,@function
_Z18__device_stub__op2Pfm: # @_Z18__device_stub__op2Pfm
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z3op2Pfm, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end1:
.size _Z18__device_stub__op2Pfm, .Lfunc_end1-_Z18__device_stub__op2Pfm
.cfi_endproc
# -- End function
.globl _Z18__device_stub__op3Pfm # -- Begin function _Z18__device_stub__op3Pfm
.p2align 4, 0x90
.type _Z18__device_stub__op3Pfm,@function
_Z18__device_stub__op3Pfm: # @_Z18__device_stub__op3Pfm
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z3op3Pfm, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end2:
.size _Z18__device_stub__op3Pfm, .Lfunc_end2-_Z18__device_stub__op3Pfm
.cfi_endproc
# -- End function
.globl _Z18__device_stub__op4Pim # -- Begin function _Z18__device_stub__op4Pim
.p2align 4, 0x90
.type _Z18__device_stub__op4Pim,@function
_Z18__device_stub__op4Pim: # @_Z18__device_stub__op4Pim
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z3op4Pim, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end3:
.size _Z18__device_stub__op4Pim, .Lfunc_end3-_Z18__device_stub__op4Pim
.cfi_endproc
# -- End function
.globl _Z18__device_stub__op5Pim # -- Begin function _Z18__device_stub__op5Pim
.p2align 4, 0x90
.type _Z18__device_stub__op5Pim,@function
_Z18__device_stub__op5Pim: # @_Z18__device_stub__op5Pim
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z3op5Pim, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end4:
.size _Z18__device_stub__op5Pim, .Lfunc_end4-_Z18__device_stub__op5Pim
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
subq $104, %rsp
.cfi_def_cfa_offset 128
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
movabsq $4294967304, %rbx # imm = 0x100000008
leaq 88(%rsp), %r14
movl $4000, %esi # imm = 0xFA0
movq %r14, %rdi
callq hipMalloc
movl $4000, %edx # imm = 0xFA0
movq %r14, %rdi
xorl %esi, %esi
callq hipMemset
leaq 120(%rbx), %r14
movq %rbx, %rdi
movl $1, %esi
movq %r14, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB5_2
# %bb.1:
movq 88(%rsp), %rax
movq %rax, 56(%rsp)
movq $1000, 48(%rsp) # imm = 0x3E8
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z3op1Pfm, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB5_2:
movq %rbx, %rdi
movl $1, %esi
movq %r14, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB5_4
# %bb.3:
movq 88(%rsp), %rax
movq %rax, 56(%rsp)
movq $1000, 48(%rsp) # imm = 0x3E8
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z3op2Pfm, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB5_4:
movq %rbx, %rdi
movl $1, %esi
movq %r14, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB5_6
# %bb.5:
movq 88(%rsp), %rax
movq %rax, 56(%rsp)
movq $1000, 48(%rsp) # imm = 0x3E8
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z3op3Pfm, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB5_6:
leaq 96(%rsp), %rdi
movl $4000, %esi # imm = 0xFA0
callq hipMalloc
movq %rbx, %rdi
movl $1, %esi
movq %r14, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB5_8
# %bb.7:
movq 96(%rsp), %rax
movq %rax, 56(%rsp)
movq $1000, 48(%rsp) # imm = 0x3E8
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z3op4Pim, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB5_8:
movq %rbx, %rdi
movl $1, %esi
movq %r14, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB5_10
# %bb.9:
movq 96(%rsp), %rax
movq %rax, 56(%rsp)
movq $1000, 48(%rsp) # imm = 0x3E8
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z3op5Pim, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB5_10:
movq 88(%rsp), %rdi
callq hipFree
movq 96(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $104, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
retq
.Lfunc_end5:
.size main, .Lfunc_end5-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB6_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB6_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z3op1Pfm, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z3op2Pfm, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z3op3Pfm, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z3op4Pim, %esi
movl $.L__unnamed_4, %edx
movl $.L__unnamed_4, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z3op5Pim, %esi
movl $.L__unnamed_5, %edx
movl $.L__unnamed_5, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end6:
.size __hip_module_ctor, .Lfunc_end6-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB7_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB7_2:
retq
.Lfunc_end7:
.size __hip_module_dtor, .Lfunc_end7-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z3op1Pfm,@object # @_Z3op1Pfm
.section .rodata,"a",@progbits
.globl _Z3op1Pfm
.p2align 3, 0x0
_Z3op1Pfm:
.quad _Z18__device_stub__op1Pfm
.size _Z3op1Pfm, 8
.type _Z3op2Pfm,@object # @_Z3op2Pfm
.globl _Z3op2Pfm
.p2align 3, 0x0
_Z3op2Pfm:
.quad _Z18__device_stub__op2Pfm
.size _Z3op2Pfm, 8
.type _Z3op3Pfm,@object # @_Z3op3Pfm
.globl _Z3op3Pfm
.p2align 3, 0x0
_Z3op3Pfm:
.quad _Z18__device_stub__op3Pfm
.size _Z3op3Pfm, 8
.type _Z3op4Pim,@object # @_Z3op4Pim
.globl _Z3op4Pim
.p2align 3, 0x0
_Z3op4Pim:
.quad _Z18__device_stub__op4Pim
.size _Z3op4Pim, 8
.type _Z3op5Pim,@object # @_Z3op5Pim
.globl _Z3op5Pim
.p2align 3, 0x0
_Z3op5Pim:
.quad _Z18__device_stub__op5Pim
.size _Z3op5Pim, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z3op1Pfm"
.size .L__unnamed_1, 10
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z3op2Pfm"
.size .L__unnamed_2, 10
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "_Z3op3Pfm"
.size .L__unnamed_3, 10
.type .L__unnamed_4,@object # @3
.L__unnamed_4:
.asciz "_Z3op4Pim"
.size .L__unnamed_4, 10
.type .L__unnamed_5,@object # @4
.L__unnamed_5:
.asciz "_Z3op5Pim"
.size .L__unnamed_5, 10
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z18__device_stub__op1Pfm
.addrsig_sym _Z18__device_stub__op2Pfm
.addrsig_sym _Z18__device_stub__op3Pfm
.addrsig_sym _Z18__device_stub__op4Pim
.addrsig_sym _Z18__device_stub__op5Pim
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z3op1Pfm
.addrsig_sym _Z3op2Pfm
.addrsig_sym _Z3op3Pfm
.addrsig_sym _Z3op4Pim
.addrsig_sym _Z3op5Pim
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_
|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
|
#include "includes.h"
__global__ void divideKernel(float* A, int size){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < size){
A[id] = 1.0f / A[id];
}
}
|
code for sm_80
Function : _Z12divideKernelPfi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R2, R2, c[0x0][0x0], R3 ; /* 0x0000000002027a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R2, c[0x0][0x168], PT ; /* 0x00005a0002007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fe200078e00ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*0080*/ IMAD.WIDE R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fca00078e0203 */
/*0090*/ LDG.E R4, [R2.64] ; /* 0x0000000402047981 */
/* 0x000ea2000c1e1900 */
/*00a0*/ BSSY B0, 0x180 ; /* 0x000000d000007945 */
/* 0x000fe20003800000 */
/*00b0*/ IADD3 R0, R4, 0x1800000, RZ ; /* 0x0180000004007810 */
/* 0x004fc80007ffe0ff */
/*00c0*/ LOP3.LUT R0, R0, 0x7f800000, RZ, 0xc0, !PT ; /* 0x7f80000000007812 */
/* 0x000fc800078ec0ff */
/*00d0*/ ISETP.GT.U32.AND P0, PT, R0, 0x1ffffff, PT ; /* 0x01ffffff0000780c */
/* 0x000fda0003f04070 */
/*00e0*/ @P0 BRA 0x130 ; /* 0x0000004000000947 */
/* 0x000fea0003800000 */
/*00f0*/ MOV R0, 0x110 ; /* 0x0000011000007802 */
/* 0x000fe40000000f00 */
/*0100*/ CALL.REL.NOINC 0x1a0 ; /* 0x0000009000007944 */
/* 0x000fea0003c00000 */
/*0110*/ IMAD.MOV.U32 R5, RZ, RZ, R6 ; /* 0x000000ffff057224 */
/* 0x000fe200078e0006 */
/*0120*/ BRA 0x170 ; /* 0x0000004000007947 */
/* 0x000fea0003800000 */
/*0130*/ MUFU.RCP R5, R4 ; /* 0x0000000400057308 */
/* 0x000e240000001000 */
/*0140*/ FFMA R0, R4, R5, -1 ; /* 0xbf80000004007423 */
/* 0x001fc80000000005 */
/*0150*/ FADD.FTZ R0, -R0, -RZ ; /* 0x800000ff00007221 */
/* 0x000fc80000010100 */
/*0160*/ FFMA R5, R5, R0, R5 ; /* 0x0000000005057223 */
/* 0x000fe40000000005 */
/*0170*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0180*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe2000c101904 */
/*0190*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*01a0*/ IMAD.SHL.U32 R5, R4, 0x2, RZ ; /* 0x0000000204057824 */
/* 0x000fe200078e00ff */
/*01b0*/ BSSY B1, 0x4c0 ; /* 0x0000030000017945 */
/* 0x000fe80003800000 */
/*01c0*/ SHF.R.U32.HI R5, RZ, 0x18, R5 ; /* 0x00000018ff057819 */
/* 0x000fc80000011605 */
/*01d0*/ ISETP.NE.U32.AND P0, PT, R5, RZ, PT ; /* 0x000000ff0500720c */
/* 0x000fda0003f05070 */
/*01e0*/ @P0 BRA 0x290 ; /* 0x000000a000000947 */
/* 0x000fea0003800000 */
/*01f0*/ IMAD.SHL.U32 R5, R4, 0x2, RZ ; /* 0x0000000204057824 */
/* 0x000fca00078e00ff */
/*0200*/ ISETP.NE.AND P0, PT, R5, RZ, PT ; /* 0x000000ff0500720c */
/* 0x000fda0003f05270 */
/*0210*/ @P0 FFMA R6, R4, 1.84467440737095516160e+19, RZ ; /* 0x5f80000004060823 */
/* 0x000fe200000000ff */
/*0220*/ @!P0 MUFU.RCP R5, R4 ; /* 0x0000000400058308 */
/* 0x000ff00000001000 */
/*0230*/ @P0 MUFU.RCP R7, R6 ; /* 0x0000000600070308 */
/* 0x000e240000001000 */
/*0240*/ @P0 FFMA R8, R6, R7, -1 ; /* 0xbf80000006080423 */
/* 0x001fc80000000007 */
/*0250*/ @P0 FADD.FTZ R8, -R8, -RZ ; /* 0x800000ff08080221 */
/* 0x000fc80000010100 */
/*0260*/ @P0 FFMA R8, R7, R8, R7 ; /* 0x0000000807080223 */
/* 0x000fc80000000007 */
/*0270*/ @P0 FFMA R5, R8, 1.84467440737095516160e+19, RZ ; /* 0x5f80000008050823 */
/* 0x000fe200000000ff */
/*0280*/ BRA 0x4b0 ; /* 0x0000022000007947 */
/* 0x000fea0003800000 */
/*0290*/ IADD3 R6, R5, -0xfd, RZ ; /* 0xffffff0305067810 */
/* 0x000fc80007ffe0ff */
/*02a0*/ ISETP.GT.U32.AND P0, PT, R6, 0x1, PT ; /* 0x000000010600780c */
/* 0x000fda0003f04070 */
/*02b0*/ @P0 BRA 0x4a0 ; /* 0x000001e000000947 */
/* 0x000fea0003800000 */
/*02c0*/ LOP3.LUT R7, R4, 0x7fffff, RZ, 0xc0, !PT ; /* 0x007fffff04077812 */
/* 0x000fe200078ec0ff */
/*02d0*/ IMAD.MOV.U32 R11, RZ, RZ, 0x3 ; /* 0x00000003ff0b7424 */
/* 0x000fc600078e00ff */
/*02e0*/ LOP3.LUT R7, R7, 0x3f800000, RZ, 0xfc, !PT ; /* 0x3f80000007077812 */
/* 0x000fe400078efcff */
/*02f0*/ SHF.L.U32 R12, R11, R6, RZ ; /* 0x000000060b0c7219 */
/* 0x000fe400000006ff */
/*0300*/ MUFU.RCP R8, R7 ; /* 0x0000000700087308 */
/* 0x000e240000001000 */
/*0310*/ FFMA R9, R7, R8, -1 ; /* 0xbf80000007097423 */
/* 0x001fc80000000008 */
/*0320*/ FADD.FTZ R9, -R9, -RZ ; /* 0x800000ff09097221 */
/* 0x000fc80000010100 */
/*0330*/ FFMA.RM R10, R8.reuse, R9.reuse, R8.reuse ; /* 0x00000009080a7223 */
/* 0x1c0fe40000004008 */
/*0340*/ FFMA.RP R9, R8, R9, R8 ; /* 0x0000000908097223 */
/* 0x000fc60000008008 */
/*0350*/ LOP3.LUT R8, R10.reuse, 0x7fffff, RZ, 0xc0, !PT ; /* 0x007fffff0a087812 */
/* 0x040fe400078ec0ff */
/*0360*/ FSETP.NEU.FTZ.AND P0, PT, R10, R9, PT ; /* 0x000000090a00720b */
/* 0x000fe40003f1d000 */
/*0370*/ LOP3.LUT R9, R8, 0x800000, RZ, 0xfc, !PT ; /* 0x0080000008097812 */
/* 0x000fe400078efcff */
/*0380*/ SEL R8, RZ, 0xffffffff, !P0 ; /* 0xffffffffff087807 */
/* 0x000fe40004000000 */
/*0390*/ LOP3.LUT R7, R12, R9, RZ, 0xc0, !PT ; /* 0x000000090c077212 */
/* 0x000fc600078ec0ff */
/*03a0*/ IMAD.MOV R8, RZ, RZ, -R8 ; /* 0x000000ffff087224 */
/* 0x000fe200078e0a08 */
/*03b0*/ SHF.R.U32.HI R7, RZ, R6, R7 ; /* 0x00000006ff077219 */
/* 0x000fc80000011607 */
/*03c0*/ LOP3.LUT P1, RZ, R8, R6, R9, 0xf8, !PT ; /* 0x0000000608ff7212 */
/* 0x000fe4000782f809 */
/*03d0*/ LOP3.LUT P0, RZ, R7.reuse, 0x1, RZ, 0xc0, !PT ; /* 0x0000000107ff7812 */
/* 0x040fe4000780c0ff */
/*03e0*/ LOP3.LUT P2, RZ, R7, 0x2, RZ, 0xc0, !PT ; /* 0x0000000207ff7812 */
/* 0x000fc8000784c0ff */
/*03f0*/ PLOP3.LUT P0, PT, P0, P1, P2, 0xe0, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40000703c20 */
/*0400*/ LOP3.LUT P1, RZ, R4, 0x7fffff, RZ, 0xc0, !PT ; /* 0x007fffff04ff7812 */
/* 0x000fe4000782c0ff */
/*0410*/ SEL R6, RZ, 0x1, !P0 ; /* 0x00000001ff067807 */
/* 0x000fca0004000000 */
/*0420*/ IMAD.MOV R6, RZ, RZ, -R6 ; /* 0x000000ffff067224 */
/* 0x000fca00078e0a06 */
/*0430*/ ISETP.GE.AND P0, PT, R6, RZ, PT ; /* 0x000000ff0600720c */
/* 0x000fe40003f06270 */
/*0440*/ IADD3 R6, R5, -0xfc, RZ ; /* 0xffffff0405067810 */
/* 0x000fc80007ffe0ff */
/*0450*/ SHF.R.U32.HI R5, RZ, R6, R9 ; /* 0x00000006ff057219 */
/* 0x000fce0000011609 */
/*0460*/ @!P0 IADD3 R5, R5, 0x1, RZ ; /* 0x0000000105058810 */
/* 0x000fca0007ffe0ff */
/*0470*/ @!P1 IMAD.SHL.U32 R5, R5, 0x2, RZ ; /* 0x0000000205059824 */
/* 0x000fca00078e00ff */
/*0480*/ LOP3.LUT R5, R5, 0x80000000, R4, 0xf8, !PT ; /* 0x8000000005057812 */
/* 0x000fe200078ef804 */
/*0490*/ BRA 0x4b0 ; /* 0x0000001000007947 */
/* 0x000fea0003800000 */
/*04a0*/ MUFU.RCP R5, R4 ; /* 0x0000000400057308 */
/* 0x0000640000001000 */
/*04b0*/ BSYNC B1 ; /* 0x0000000000017941 */
/* 0x000fea0003800000 */
/*04c0*/ IMAD.MOV.U32 R6, RZ, RZ, R5 ; /* 0x000000ffff067224 */
/* 0x002fe400078e0005 */
/*04d0*/ IMAD.MOV.U32 R4, RZ, RZ, R0 ; /* 0x000000ffff047224 */
/* 0x001fe400078e0000 */
/*04e0*/ IMAD.MOV.U32 R5, RZ, RZ, 0x0 ; /* 0x00000000ff057424 */
/* 0x000fc800078e00ff */
/*04f0*/ RET.REL.NODEC R4 0x0 ; /* 0xfffffb0004007950 */
/* 0x000fea0003c3ffff */
/*0500*/ BRA 0x500; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0510*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0520*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0530*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0540*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0550*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0560*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0570*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0580*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0590*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
|
#include "includes.h"
__global__ void divideKernel(float* A, int size){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < size){
A[id] = 1.0f / A[id];
}
}
|
.file "tmpxft_0002cb80_00000000-6_divideKernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z33__device_stub__Z12divideKernelPfiPfi
.type _Z33__device_stub__Z12divideKernelPfiPfi, @function
_Z33__device_stub__Z12divideKernelPfiPfi:
.LFB2051:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movl %esi, 4(%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
leaq 4(%rsp), %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z12divideKernelPfi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z33__device_stub__Z12divideKernelPfiPfi, .-_Z33__device_stub__Z12divideKernelPfiPfi
.globl _Z12divideKernelPfi
.type _Z12divideKernelPfi, @function
_Z12divideKernelPfi:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z33__device_stub__Z12divideKernelPfiPfi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z12divideKernelPfi, .-_Z12divideKernelPfi
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z12divideKernelPfi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z12divideKernelPfi(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4:
|
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
|
#include "includes.h"
__global__ void divideKernel(float* A, int size){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < size){
A[id] = 1.0f / A[id];
}
}
|
#include <hip/hip_runtime.h>
#include "includes.h"
__global__ void divideKernel(float* A, int size){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < size){
A[id] = 1.0f / A[id];
}
}
|
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
|
#include <hip/hip_runtime.h>
#include "includes.h"
__global__ void divideKernel(float* A, int size){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < size){
A[id] = 1.0f / A[id];
}
}
|
.text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z12divideKernelPfi
.globl _Z12divideKernelPfi
.p2align 8
.type _Z12divideKernelPfi,@function
_Z12divideKernelPfi:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x1c
s_load_b32 s3, s[0:1], 0x8
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_load_b64 s[0:1], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_load_b32 v2, v[0:1], off
s_waitcnt vmcnt(0)
v_div_scale_f32 v3, null, v2, v2, 1.0
v_rcp_f32_e32 v4, v3
s_waitcnt_depctr 0xfff
v_fma_f32 v5, -v3, v4, 1.0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_fmac_f32_e32 v4, v5, v4
v_div_scale_f32 v5, vcc_lo, 1.0, v2, 1.0
v_mul_f32_e32 v6, v5, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f32 v7, -v3, v6, v5
v_fmac_f32_e32 v6, v7, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f32 v3, -v3, v6, v5
v_div_fmas_f32 v3, v3, v4, v6
s_delay_alu instid0(VALU_DEP_1)
v_div_fixup_f32 v2, v3, v2, 1.0
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z12divideKernelPfi
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 8
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z12divideKernelPfi, .Lfunc_end0-_Z12divideKernelPfi
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z12divideKernelPfi
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z12divideKernelPfi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 8
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata
|
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
|
#include <hip/hip_runtime.h>
#include "includes.h"
__global__ void divideKernel(float* A, int size){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < size){
A[id] = 1.0f / A[id];
}
}
|
.text
.file "divideKernel.hip"
.globl _Z27__device_stub__divideKernelPfi # -- Begin function _Z27__device_stub__divideKernelPfi
.p2align 4, 0x90
.type _Z27__device_stub__divideKernelPfi,@function
_Z27__device_stub__divideKernelPfi: # @_Z27__device_stub__divideKernelPfi
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movl %esi, 4(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 4(%rsp), %rax
movq %rax, 72(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z12divideKernelPfi, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z27__device_stub__divideKernelPfi, .Lfunc_end0-_Z27__device_stub__divideKernelPfi
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z12divideKernelPfi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z12divideKernelPfi,@object # @_Z12divideKernelPfi
.section .rodata,"a",@progbits
.globl _Z12divideKernelPfi
.p2align 3, 0x0
_Z12divideKernelPfi:
.quad _Z27__device_stub__divideKernelPfi
.size _Z12divideKernelPfi, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z12divideKernelPfi"
.size .L__unnamed_1, 20
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z27__device_stub__divideKernelPfi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z12divideKernelPfi
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_
|
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
|
code for sm_80
Function : _Z12divideKernelPfi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R2, R2, c[0x0][0x0], R3 ; /* 0x0000000002027a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R2, c[0x0][0x168], PT ; /* 0x00005a0002007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fe200078e00ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*0080*/ IMAD.WIDE R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fca00078e0203 */
/*0090*/ LDG.E R4, [R2.64] ; /* 0x0000000402047981 */
/* 0x000ea2000c1e1900 */
/*00a0*/ BSSY B0, 0x180 ; /* 0x000000d000007945 */
/* 0x000fe20003800000 */
/*00b0*/ IADD3 R0, R4, 0x1800000, RZ ; /* 0x0180000004007810 */
/* 0x004fc80007ffe0ff */
/*00c0*/ LOP3.LUT R0, R0, 0x7f800000, RZ, 0xc0, !PT ; /* 0x7f80000000007812 */
/* 0x000fc800078ec0ff */
/*00d0*/ ISETP.GT.U32.AND P0, PT, R0, 0x1ffffff, PT ; /* 0x01ffffff0000780c */
/* 0x000fda0003f04070 */
/*00e0*/ @P0 BRA 0x130 ; /* 0x0000004000000947 */
/* 0x000fea0003800000 */
/*00f0*/ MOV R0, 0x110 ; /* 0x0000011000007802 */
/* 0x000fe40000000f00 */
/*0100*/ CALL.REL.NOINC 0x1a0 ; /* 0x0000009000007944 */
/* 0x000fea0003c00000 */
/*0110*/ IMAD.MOV.U32 R5, RZ, RZ, R6 ; /* 0x000000ffff057224 */
/* 0x000fe200078e0006 */
/*0120*/ BRA 0x170 ; /* 0x0000004000007947 */
/* 0x000fea0003800000 */
/*0130*/ MUFU.RCP R5, R4 ; /* 0x0000000400057308 */
/* 0x000e240000001000 */
/*0140*/ FFMA R0, R4, R5, -1 ; /* 0xbf80000004007423 */
/* 0x001fc80000000005 */
/*0150*/ FADD.FTZ R0, -R0, -RZ ; /* 0x800000ff00007221 */
/* 0x000fc80000010100 */
/*0160*/ FFMA R5, R5, R0, R5 ; /* 0x0000000005057223 */
/* 0x000fe40000000005 */
/*0170*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0180*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe2000c101904 */
/*0190*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*01a0*/ IMAD.SHL.U32 R5, R4, 0x2, RZ ; /* 0x0000000204057824 */
/* 0x000fe200078e00ff */
/*01b0*/ BSSY B1, 0x4c0 ; /* 0x0000030000017945 */
/* 0x000fe80003800000 */
/*01c0*/ SHF.R.U32.HI R5, RZ, 0x18, R5 ; /* 0x00000018ff057819 */
/* 0x000fc80000011605 */
/*01d0*/ ISETP.NE.U32.AND P0, PT, R5, RZ, PT ; /* 0x000000ff0500720c */
/* 0x000fda0003f05070 */
/*01e0*/ @P0 BRA 0x290 ; /* 0x000000a000000947 */
/* 0x000fea0003800000 */
/*01f0*/ IMAD.SHL.U32 R5, R4, 0x2, RZ ; /* 0x0000000204057824 */
/* 0x000fca00078e00ff */
/*0200*/ ISETP.NE.AND P0, PT, R5, RZ, PT ; /* 0x000000ff0500720c */
/* 0x000fda0003f05270 */
/*0210*/ @P0 FFMA R6, R4, 1.84467440737095516160e+19, RZ ; /* 0x5f80000004060823 */
/* 0x000fe200000000ff */
/*0220*/ @!P0 MUFU.RCP R5, R4 ; /* 0x0000000400058308 */
/* 0x000ff00000001000 */
/*0230*/ @P0 MUFU.RCP R7, R6 ; /* 0x0000000600070308 */
/* 0x000e240000001000 */
/*0240*/ @P0 FFMA R8, R6, R7, -1 ; /* 0xbf80000006080423 */
/* 0x001fc80000000007 */
/*0250*/ @P0 FADD.FTZ R8, -R8, -RZ ; /* 0x800000ff08080221 */
/* 0x000fc80000010100 */
/*0260*/ @P0 FFMA R8, R7, R8, R7 ; /* 0x0000000807080223 */
/* 0x000fc80000000007 */
/*0270*/ @P0 FFMA R5, R8, 1.84467440737095516160e+19, RZ ; /* 0x5f80000008050823 */
/* 0x000fe200000000ff */
/*0280*/ BRA 0x4b0 ; /* 0x0000022000007947 */
/* 0x000fea0003800000 */
/*0290*/ IADD3 R6, R5, -0xfd, RZ ; /* 0xffffff0305067810 */
/* 0x000fc80007ffe0ff */
/*02a0*/ ISETP.GT.U32.AND P0, PT, R6, 0x1, PT ; /* 0x000000010600780c */
/* 0x000fda0003f04070 */
/*02b0*/ @P0 BRA 0x4a0 ; /* 0x000001e000000947 */
/* 0x000fea0003800000 */
/*02c0*/ LOP3.LUT R7, R4, 0x7fffff, RZ, 0xc0, !PT ; /* 0x007fffff04077812 */
/* 0x000fe200078ec0ff */
/*02d0*/ IMAD.MOV.U32 R11, RZ, RZ, 0x3 ; /* 0x00000003ff0b7424 */
/* 0x000fc600078e00ff */
/*02e0*/ LOP3.LUT R7, R7, 0x3f800000, RZ, 0xfc, !PT ; /* 0x3f80000007077812 */
/* 0x000fe400078efcff */
/*02f0*/ SHF.L.U32 R12, R11, R6, RZ ; /* 0x000000060b0c7219 */
/* 0x000fe400000006ff */
/*0300*/ MUFU.RCP R8, R7 ; /* 0x0000000700087308 */
/* 0x000e240000001000 */
/*0310*/ FFMA R9, R7, R8, -1 ; /* 0xbf80000007097423 */
/* 0x001fc80000000008 */
/*0320*/ FADD.FTZ R9, -R9, -RZ ; /* 0x800000ff09097221 */
/* 0x000fc80000010100 */
/*0330*/ FFMA.RM R10, R8.reuse, R9.reuse, R8.reuse ; /* 0x00000009080a7223 */
/* 0x1c0fe40000004008 */
/*0340*/ FFMA.RP R9, R8, R9, R8 ; /* 0x0000000908097223 */
/* 0x000fc60000008008 */
/*0350*/ LOP3.LUT R8, R10.reuse, 0x7fffff, RZ, 0xc0, !PT ; /* 0x007fffff0a087812 */
/* 0x040fe400078ec0ff */
/*0360*/ FSETP.NEU.FTZ.AND P0, PT, R10, R9, PT ; /* 0x000000090a00720b */
/* 0x000fe40003f1d000 */
/*0370*/ LOP3.LUT R9, R8, 0x800000, RZ, 0xfc, !PT ; /* 0x0080000008097812 */
/* 0x000fe400078efcff */
/*0380*/ SEL R8, RZ, 0xffffffff, !P0 ; /* 0xffffffffff087807 */
/* 0x000fe40004000000 */
/*0390*/ LOP3.LUT R7, R12, R9, RZ, 0xc0, !PT ; /* 0x000000090c077212 */
/* 0x000fc600078ec0ff */
/*03a0*/ IMAD.MOV R8, RZ, RZ, -R8 ; /* 0x000000ffff087224 */
/* 0x000fe200078e0a08 */
/*03b0*/ SHF.R.U32.HI R7, RZ, R6, R7 ; /* 0x00000006ff077219 */
/* 0x000fc80000011607 */
/*03c0*/ LOP3.LUT P1, RZ, R8, R6, R9, 0xf8, !PT ; /* 0x0000000608ff7212 */
/* 0x000fe4000782f809 */
/*03d0*/ LOP3.LUT P0, RZ, R7.reuse, 0x1, RZ, 0xc0, !PT ; /* 0x0000000107ff7812 */
/* 0x040fe4000780c0ff */
/*03e0*/ LOP3.LUT P2, RZ, R7, 0x2, RZ, 0xc0, !PT ; /* 0x0000000207ff7812 */
/* 0x000fc8000784c0ff */
/*03f0*/ PLOP3.LUT P0, PT, P0, P1, P2, 0xe0, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40000703c20 */
/*0400*/ LOP3.LUT P1, RZ, R4, 0x7fffff, RZ, 0xc0, !PT ; /* 0x007fffff04ff7812 */
/* 0x000fe4000782c0ff */
/*0410*/ SEL R6, RZ, 0x1, !P0 ; /* 0x00000001ff067807 */
/* 0x000fca0004000000 */
/*0420*/ IMAD.MOV R6, RZ, RZ, -R6 ; /* 0x000000ffff067224 */
/* 0x000fca00078e0a06 */
/*0430*/ ISETP.GE.AND P0, PT, R6, RZ, PT ; /* 0x000000ff0600720c */
/* 0x000fe40003f06270 */
/*0440*/ IADD3 R6, R5, -0xfc, RZ ; /* 0xffffff0405067810 */
/* 0x000fc80007ffe0ff */
/*0450*/ SHF.R.U32.HI R5, RZ, R6, R9 ; /* 0x00000006ff057219 */
/* 0x000fce0000011609 */
/*0460*/ @!P0 IADD3 R5, R5, 0x1, RZ ; /* 0x0000000105058810 */
/* 0x000fca0007ffe0ff */
/*0470*/ @!P1 IMAD.SHL.U32 R5, R5, 0x2, RZ ; /* 0x0000000205059824 */
/* 0x000fca00078e00ff */
/*0480*/ LOP3.LUT R5, R5, 0x80000000, R4, 0xf8, !PT ; /* 0x8000000005057812 */
/* 0x000fe200078ef804 */
/*0490*/ BRA 0x4b0 ; /* 0x0000001000007947 */
/* 0x000fea0003800000 */
/*04a0*/ MUFU.RCP R5, R4 ; /* 0x0000000400057308 */
/* 0x0000640000001000 */
/*04b0*/ BSYNC B1 ; /* 0x0000000000017941 */
/* 0x000fea0003800000 */
/*04c0*/ IMAD.MOV.U32 R6, RZ, RZ, R5 ; /* 0x000000ffff067224 */
/* 0x002fe400078e0005 */
/*04d0*/ IMAD.MOV.U32 R4, RZ, RZ, R0 ; /* 0x000000ffff047224 */
/* 0x001fe400078e0000 */
/*04e0*/ IMAD.MOV.U32 R5, RZ, RZ, 0x0 ; /* 0x00000000ff057424 */
/* 0x000fc800078e00ff */
/*04f0*/ RET.REL.NODEC R4 0x0 ; /* 0xfffffb0004007950 */
/* 0x000fea0003c3ffff */
/*0500*/ BRA 0x500; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0510*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0520*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0530*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0540*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0550*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0560*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0570*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0580*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0590*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
|
.text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z12divideKernelPfi
.globl _Z12divideKernelPfi
.p2align 8
.type _Z12divideKernelPfi,@function
_Z12divideKernelPfi:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x1c
s_load_b32 s3, s[0:1], 0x8
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_load_b64 s[0:1], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_load_b32 v2, v[0:1], off
s_waitcnt vmcnt(0)
v_div_scale_f32 v3, null, v2, v2, 1.0
v_rcp_f32_e32 v4, v3
s_waitcnt_depctr 0xfff
v_fma_f32 v5, -v3, v4, 1.0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_fmac_f32_e32 v4, v5, v4
v_div_scale_f32 v5, vcc_lo, 1.0, v2, 1.0
v_mul_f32_e32 v6, v5, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f32 v7, -v3, v6, v5
v_fmac_f32_e32 v6, v7, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f32 v3, -v3, v6, v5
v_div_fmas_f32 v3, v3, v4, v6
s_delay_alu instid0(VALU_DEP_1)
v_div_fixup_f32 v2, v3, v2, 1.0
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z12divideKernelPfi
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 8
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z12divideKernelPfi, .Lfunc_end0-_Z12divideKernelPfi
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z12divideKernelPfi
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z12divideKernelPfi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 8
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata
|
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
|
.file "tmpxft_0002cb80_00000000-6_divideKernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z33__device_stub__Z12divideKernelPfiPfi
.type _Z33__device_stub__Z12divideKernelPfiPfi, @function
_Z33__device_stub__Z12divideKernelPfiPfi:
.LFB2051:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movl %esi, 4(%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
leaq 4(%rsp), %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z12divideKernelPfi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z33__device_stub__Z12divideKernelPfiPfi, .-_Z33__device_stub__Z12divideKernelPfiPfi
.globl _Z12divideKernelPfi
.type _Z12divideKernelPfi, @function
_Z12divideKernelPfi:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z33__device_stub__Z12divideKernelPfiPfi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z12divideKernelPfi, .-_Z12divideKernelPfi
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z12divideKernelPfi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z12divideKernelPfi(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4:
|
.text
.file "divideKernel.hip"
.globl _Z27__device_stub__divideKernelPfi # -- Begin function _Z27__device_stub__divideKernelPfi
.p2align 4, 0x90
.type _Z27__device_stub__divideKernelPfi,@function
_Z27__device_stub__divideKernelPfi: # @_Z27__device_stub__divideKernelPfi
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movl %esi, 4(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 4(%rsp), %rax
movq %rax, 72(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z12divideKernelPfi, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z27__device_stub__divideKernelPfi, .Lfunc_end0-_Z27__device_stub__divideKernelPfi
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z12divideKernelPfi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z12divideKernelPfi,@object # @_Z12divideKernelPfi
.section .rodata,"a",@progbits
.globl _Z12divideKernelPfi
.p2align 3, 0x0
_Z12divideKernelPfi:
.quad _Z27__device_stub__divideKernelPfi
.size _Z12divideKernelPfi, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z12divideKernelPfi"
.size .L__unnamed_1, 20
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z27__device_stub__divideKernelPfi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z12divideKernelPfi
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_
|
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
|
#include <iostream>
#include <vector>
#include <ctime>
#include <cstdlib>
#include <algorithm>
#include <sstream>
#include <fstream>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/sort.h>
using namespace std;
struct element
{
int key;
float value;
double nana;
__host__ __device__
bool operator<(const element other) const
{
return nana < other.nana;
}
};
//Fill a vector with random numbers in the range [lower, upper]
void rnd_fill(thrust::host_vector<element> &V, const double lower, const double upper, int seed) {
//Create a unique seed for the random number generator
srand(time(NULL));
size_t elem = V.size();
for( size_t i = 0; i < elem; ++i){
V[i].nana = (double) rand() / (double) RAND_MAX;
}
}
template <typename T>
inline void str2num(string str, T& num){
if ( ! (istringstream(str) >> num) ) num = 0;
}
int main( int argc, char** argv ){
int numelem;
string param = argv[1]; str2num( param, numelem);
int numexp;
param = argv[2]; str2num( param, numexp);
ofstream result("experiment.data");
vector<time_t> timer(6);
timer[0] = 0;
timer[5] = 0;
for(int i = 0; i < numelem; ++i){
int size = pow(2, i);
cout << "# elems: " << size << endl;
for( int j = 0; j < numexp; ++j){
//Initialization
thrust::host_vector<element> h_V;
thrust::device_vector<element> g_V;
h_V.resize( size );
int seed = time(0);
rnd_fill( h_V, 0.1, 1.0, seed);
g_V = h_V;
vector<element> c_V;
c_V.resize( size );
for( int i = 0; i < h_V.size(); ++i){
c_V[i].nana = h_V[i].nana;
}
timer[1] = time(0);
thrust::sort( g_V.begin(), g_V.end() );
h_V = g_V;
for( int k = 0; k < g_V.size(); k++){
cout << h_V[k].nana << "\t";
}
timer[2] = time(0);
// result << size << "," << difftime(timer[2], timer[1]) << endl;
timer[0] += difftime(timer[2], timer[1]);
timer[3] = time(0);
// sort( c_V.begin(), c_V.end());
timer[4] = time(0);
// result << size << "," << difftime(timer[4], timer[3]) << endl;
timer[5] += difftime(timer[4], timer[3]);
}
}
cout << "GPU SORTING\n";
cout <<"Tiempo total: " << timer[0] << endl;
cout << "CPU SORTING\n";
cout <<"Tiempo total: " << timer[5] << endl;
result.close();
return 0;
}
|
#include <hip/hip_runtime.h>
#include <iostream>
#include <vector>
#include <ctime>
#include <cstdlib>
#include <algorithm>
#include <sstream>
#include <fstream>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/sort.h>
using namespace std;
struct element
{
int key;
float value;
double nana;
__host__ __device__
bool operator<(const element other) const
{
return nana < other.nana;
}
};
//Fill a vector with random numbers in the range [lower, upper]
void rnd_fill(thrust::host_vector<element> &V, const double lower, const double upper, int seed) {
//Create a unique seed for the random number generator
srand(time(NULL));
size_t elem = V.size();
for( size_t i = 0; i < elem; ++i){
V[i].nana = (double) rand() / (double) RAND_MAX;
}
}
template <typename T>
inline void str2num(string str, T& num){
if ( ! (istringstream(str) >> num) ) num = 0;
}
int main( int argc, char** argv ){
int numelem;
string param = argv[1]; str2num( param, numelem);
int numexp;
param = argv[2]; str2num( param, numexp);
ofstream result("experiment.data");
vector<time_t> timer(6);
timer[0] = 0;
timer[5] = 0;
for(int i = 0; i < numelem; ++i){
int size = pow(2, i);
cout << "# elems: " << size << endl;
for( int j = 0; j < numexp; ++j){
//Initialization
thrust::host_vector<element> h_V;
thrust::device_vector<element> g_V;
h_V.resize( size );
int seed = time(0);
rnd_fill( h_V, 0.1, 1.0, seed);
g_V = h_V;
vector<element> c_V;
c_V.resize( size );
for( int i = 0; i < h_V.size(); ++i){
c_V[i].nana = h_V[i].nana;
}
timer[1] = time(0);
thrust::sort( g_V.begin(), g_V.end() );
h_V = g_V;
for( int k = 0; k < g_V.size(); k++){
cout << h_V[k].nana << "\t";
}
timer[2] = time(0);
// result << size << "," << difftime(timer[2], timer[1]) << endl;
timer[0] += difftime(timer[2], timer[1]);
timer[3] = time(0);
// sort( c_V.begin(), c_V.end());
timer[4] = time(0);
// result << size << "," << difftime(timer[4], timer[3]) << endl;
timer[5] += difftime(timer[4], timer[3]);
}
}
cout << "GPU SORTING\n";
cout <<"Tiempo total: " << timer[0] << endl;
cout << "CPU SORTING\n";
cout <<"Tiempo total: " << timer[5] << endl;
result.close();
return 0;
}
|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
|
#if __linux__ && defined(__INTEL_COMPILER)
#define __sync_fetch_and_add(ptr,addend) _InterlockedExchangeAdd(const_cast<void*>(reinterpret_cast<volatile void*>(ptr)), addend)
#endif
#include <string>
#include <cstring>
#include <cctype>
#include <cstdlib>
#include <cstdio>
#include <iostream>
#include <fstream>
#include <bitset>
#include <time.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include "utility.h"
#include "csv.hpp"
#include<thrust/host_vector.h>
#include<thrust/device_vector.h>
#include<thrust/sort.h>
using namespace std;
std::vector<string> sv;
std::vector<string> sourceIP;
std::vector<string> destinationIP;
std::vector<string> timestamp;
std::vector<string> IPstring_src;
std::vector<string> IPstring_dst;
std::vector<std::string> split_string_2(std::string str, char del) {
int first = 0;
int last = str.find_first_of(del);
std::vector<std::string> result;
while (first < str.size()) {
std::string subStr(str, first, last - first);
result.push_back(subStr);
first = last + 1;
last = str.find_first_of(del, first);
if (last == std::string::npos) {
last = str.size();
}
}
return result;
}
int main( int argc, char* argv[] ) {
int counter = 0;
struct timespec startTime, endTime, sleepTime;
int N = atoi(argv[2]);
int netmask;
std::map <int,int> found_flag;
const string session_file = std::string(argv[1]);
vector<vector<string>> session_data;
try {
Csv objCsv(session_file);
if (!objCsv.getCsv(session_data)) {
cout << "read ERROR" << endl;
return 1;
}
}
catch (...) {
cout << "EXCEPTION (session)" << endl;
return 1;
}
thrust::host_vector<unsigned long> V1(session_data.size());
thrust::host_vector<unsigned long> V2(session_data.size());
thrust::host_vector<unsigned long> V3(session_data.size());
clock_gettime(CLOCK_REALTIME, &startTime);
sleepTime.tv_sec = 0;
sleepTime.tv_nsec = 123;
for (unsigned int row2 = 0; row2 < session_data.size(); row2++) {
vector<string> rec2 = session_data[row2];
std::string srcIP = rec2[4];
for(size_t c = srcIP.find_first_of("\""); c != string::npos; c = c = srcIP.find_first_of("\"")){
srcIP.erase(c,1);
}
char del2 ='.';
std::string sessionIPstring;
for (const auto subStr : split_string_2(srcIP, del2)) {
unsigned long ipaddr_src;
ipaddr_src = stoul(subStr.c_str());
std::bitset<8> trans = std::bitset<8>(ipaddr_src);
std::string trans_string = trans.to_string();
sessionIPstring = sessionIPstring + trans_string;
}
std::bitset<32> bit_sessionIP(sessionIPstring);
std::bitset<32> bit_sessionIP_cuda(sessionIPstring);
// cout << srcIP << "," << bit_sessionIP.to_ulong() << endl;
/* mask 1 */
int netmask=24;
std::bitset<32> trans2(0xFFFFFFFF);
trans2 <<= 32-netmask;
// trans2 >>= netmask;
bit_sessionIP &= trans2;
std::string rev_string = bit_sessionIP.to_string();
string bs1 = rev_string.substr(0,8);
int bi1 = bitset<8>(bs1).to_ulong();
string bs2 = rev_string.substr(8,8);
int bi2 = bitset<8>(bs2).to_ulong();
string bs3 = rev_string.substr(16,8);
int bi3 = bitset<8>(bs3).to_ulong();
string bs4 = rev_string.substr(24,8);
int bi4 = bitset<8>(bs4).to_ulong();
string revIP = to_string(bi1) + "." + to_string(bi2) + "." + to_string(bi3) + "." + to_string(bi4);
if(row2 < 10)
{
cout << srcIP << "," << bit_sessionIP_cuda.to_ulong() << "," << bit_sessionIP_cuda << "," << trans2 << "," << revIP << endl;
}
}
clock_gettime(CLOCK_REALTIME, &endTime);
if (endTime.tv_nsec < startTime.tv_nsec) {
printf("%10ld.%09ld", endTime.tv_sec - startTime.tv_sec - 1, endTime.tv_nsec + 1000000000 - startTime.tv_nsec);
} else {
printf("%10ld.%09ld", endTime.tv_sec - startTime.tv_sec,endTime.tv_nsec - startTime.tv_nsec);
}
printf(" sec\n");
clock_gettime(CLOCK_REALTIME, &startTime);
sleepTime.tv_sec = 0;
sleepTime.tv_nsec = 123;
for (unsigned int row2 = 0; row2 < session_data.size(); row2++) {
vector<string> rec2 = session_data[row2];
std::string srcIP = rec2[4];
for(size_t c = srcIP.find_first_of("\""); c != string::npos; c = c = srcIP.find_first_of("\"")){
srcIP.erase(c,1);
}
char del2 ='.';
std::string sessionIPstring;
for (const auto subStr : split_string_2(srcIP, del2)) {
unsigned long ipaddr_src;
ipaddr_src = stoul(subStr.c_str());
std::bitset<8> trans = std::bitset<8>(ipaddr_src);
std::string trans_string = trans.to_string();
sessionIPstring = sessionIPstring + trans_string;
}
std::bitset<32> bit_sessionIP_cuda(sessionIPstring);
V1[row2] = bit_sessionIP_cuda.to_ulong();
}
netmask=24;
std::bitset<32> trans2(0xFFFFFFFF);
// trans2 <<= netmask;
trans2 <<= 32-netmask;
thrust::fill(V2.begin(), V2.end(), trans2.to_ulong());
thrust::device_vector<unsigned long> DV1 = V1;
thrust::device_vector<unsigned long> DV2 = V2;
thrust::device_vector<unsigned long> DV3 = V3;
thrust::transform(DV1.begin(), DV1.end(), DV2.begin(), DV3.begin(), thrust::bit_and<unsigned long>());
for(int i = 0; i < 10; i++)
{
std::bitset<32> bs1(DV1[i]);
std::bitset<32> cuda_sessionIP(DV3[i]);
std::string rev_string_2 = cuda_sessionIP.to_string();
string rev1 = rev_string_2.substr(0,8);
int bi1 = bitset<8>(rev1).to_ulong();
string rev2 = rev_string_2.substr(8,8);
int bi2 = bitset<8>(rev2).to_ulong();
string rev3 = rev_string_2.substr(16,8);
int bi3 = bitset<8>(rev3).to_ulong();
string rev4 = rev_string_2.substr(24,8);
int bi4 = bitset<8>(rev4).to_ulong();
string revIP_2 = to_string(bi1) + "." + to_string(bi2) + "." + to_string(bi3) + "." + to_string(bi4);
std::cout << DV1[i] << "," << bs1 << "," << DV2[i] << "," << trans2 << "," << DV3[i] << "," << revIP_2 << std::endl;
}
clock_gettime(CLOCK_REALTIME, &endTime);
if (endTime.tv_nsec < startTime.tv_nsec) {
printf("%10ld.%09ld", endTime.tv_sec - startTime.tv_sec - 1, endTime.tv_nsec + 1000000000 - startTime.tv_nsec);
} else {
printf("%10ld.%09ld", endTime.tv_sec - startTime.tv_sec,endTime.tv_nsec - startTime.tv_nsec);
}
printf(" sec\n");
}
|
code for sm_80
Function : _ZN3cub17CUB_200700_800_NS6detail8for_each13static_kernelINS2_12policy_hub_t12policy_350_tElN6thrust20THRUST_200700_800_NS8cuda_cub11__transform18binary_transform_fINS7_6detail15normal_iteratorINS7_10device_ptrImEEEESF_SF_NS9_14no_stencil_tagENS7_7bit_andImEENS9_21always_true_predicateEEEEEvT0_T1_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */
/* 0x000e220000002500 */
/*0020*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*0030*/ S2R R9, SR_TID.X ; /* 0x0000000000097919 */
/* 0x000e620000002100 */
/*0040*/ IMAD.WIDE.U32 R2, R2, 0x200, RZ ; /* 0x0000020002027825 */
/* 0x001fca00078e00ff */
/*0050*/ IADD3 R0, P1, R2.reuse, R9, RZ ; /* 0x0000000902007210 */
/* 0x042fe40007f3e0ff */
/*0060*/ IADD3 R8, P0, -R2, c[0x0][0x160], RZ ; /* 0x0000580002087a10 */
/* 0x000fc60007f1e1ff */
/*0070*/ IMAD.X R5, RZ, RZ, R3, P1 ; /* 0x000000ffff057224 */
/* 0x000fe200008e0603 */
/*0080*/ IADD3.X R3, ~R3, c[0x0][0x164], RZ, P0, !PT ; /* 0x0000590003037a10 */
/* 0x000fe200007fe5ff */
/*0090*/ IMAD.SHL.U32 R2, R0, 0x8, RZ ; /* 0x0000000800027824 */
/* 0x000fe200078e00ff */
/*00a0*/ ISETP.GT.U32.AND P0, PT, R8, 0x1ff, PT ; /* 0x000001ff0800780c */
/* 0x000fe40003f04070 */
/*00b0*/ SHF.L.U64.HI R0, R0, 0x3, R5 ; /* 0x0000000300007819 */
/* 0x000fe40000010205 */
/*00c0*/ ISETP.GT.AND.EX P0, PT, R3, RZ, PT, P0 ; /* 0x000000ff0300720c */
/* 0x000fe40003f04300 */
/*00d0*/ IADD3 R4, P1, R2.reuse, c[0x0][0x168], RZ ; /* 0x00005a0002047a10 */
/* 0x040fe40007f3e0ff */
/*00e0*/ IADD3 R6, P2, R2, c[0x0][0x170], RZ ; /* 0x00005c0002067a10 */
/* 0x000fc40007f5e0ff */
/*00f0*/ IADD3 R2, P3, R2, c[0x0][0x178], RZ ; /* 0x00005e0002027a10 */
/* 0x000fe40007f7e0ff */
/*0100*/ IADD3.X R5, R0.reuse, c[0x0][0x16c], RZ, P1, !PT ; /* 0x00005b0000057a10 */
/* 0x040fe40000ffe4ff */
/*0110*/ IADD3.X R7, R0.reuse, c[0x0][0x174], RZ, P2, !PT ; /* 0x00005d0000077a10 */
/* 0x040fe400017fe4ff */
/*0120*/ IADD3.X R3, R0, c[0x0][0x17c], RZ, P3, !PT ; /* 0x00005f0000037a10 */
/* 0x000fe20001ffe4ff */
/*0130*/ @P0 BRA 0x290 ; /* 0x0000015000000947 */
/* 0x000fea0003800000 */
/*0140*/ IADD3 R0, R9, 0x100, RZ ; /* 0x0000010009007810 */
/* 0x000fe20007ffe0ff */
/*0150*/ BSSY B0, 0x220 ; /* 0x000000c000007945 */
/* 0x000fe20003800000 */
/*0160*/ ISETP.GT.U32.AND P0, PT, R8, R9, PT ; /* 0x000000090800720c */
/* 0x000fe40003f04070 */
/*0170*/ SHF.R.S32.HI R9, RZ, 0x1f, R8 ; /* 0x0000001fff097819 */
/* 0x000fc40000011408 */
/*0180*/ ISETP.GT.U32.AND P1, PT, R8, R0, PT ; /* 0x000000000800720c */
/* 0x000fe40003f24070 */
/*0190*/ ISETP.GT.AND.EX P0, PT, R9.reuse, RZ, PT, P0 ; /* 0x000000ff0900720c */
/* 0x040fe40003f04300 */
/*01a0*/ ISETP.GT.AND.EX P1, PT, R9, RZ, PT, P1 ; /* 0x000000ff0900720c */
/* 0x000fd60003f24310 */
/*01b0*/ @!P0 BRA 0x210 ; /* 0x0000005000008947 */
/* 0x000fea0003800000 */
/*01c0*/ LDG.E.64 R8, [R4.64] ; /* 0x0000000404087981 */
/* 0x000ea8000c1e1b00 */
/*01d0*/ LDG.E.64 R10, [R6.64] ; /* 0x00000004060a7981 */
/* 0x000ea4000c1e1b00 */
/*01e0*/ LOP3.LUT R8, R10, R8, RZ, 0xc0, !PT ; /* 0x000000080a087212 */
/* 0x004fe400078ec0ff */
/*01f0*/ LOP3.LUT R9, R11, R9, RZ, 0xc0, !PT ; /* 0x000000090b097212 */
/* 0x000fca00078ec0ff */
/*0200*/ STG.E.64 [R2.64], R8 ; /* 0x0000000802007986 */
/* 0x0001e4000c101b04 */
/*0210*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0220*/ @!P1 EXIT ; /* 0x000000000000994d */
/* 0x000fea0003800000 */
/*0230*/ LDG.E.64 R6, [R6.64+0x800] ; /* 0x0008000406067981 */
/* 0x000ea8000c1e1b00 */
/*0240*/ LDG.E.64 R4, [R4.64+0x800] ; /* 0x0008000404047981 */
/* 0x000ea4000c1e1b00 */
/*0250*/ LOP3.LUT R8, R6, R4, RZ, 0xc0, !PT ; /* 0x0000000406087212 */
/* 0x005fe400078ec0ff */
/*0260*/ LOP3.LUT R9, R7, R5, RZ, 0xc0, !PT ; /* 0x0000000507097212 */
/* 0x000fca00078ec0ff */
/*0270*/ STG.E.64 [R2.64+0x800], R8 ; /* 0x0008000802007986 */
/* 0x000fe2000c101b04 */
/*0280*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0290*/ LDG.E.64 R8, [R4.64] ; /* 0x0000000404087981 */
/* 0x000ea8000c1e1b00 */
/*02a0*/ LDG.E.64 R10, [R6.64] ; /* 0x00000004060a7981 */
/* 0x000ea4000c1e1b00 */
/*02b0*/ LOP3.LUT R12, R10, R8, RZ, 0xc0, !PT ; /* 0x000000080a0c7212 */
/* 0x004fe400078ec0ff */
/*02c0*/ LOP3.LUT R13, R11, R9, RZ, 0xc0, !PT ; /* 0x000000090b0d7212 */
/* 0x000fca00078ec0ff */
/*02d0*/ STG.E.64 [R2.64], R12 ; /* 0x0000000c02007986 */
/* 0x000fe8000c101b04 */
/*02e0*/ LDG.E.64 R8, [R6.64+0x800] ; /* 0x0008000406087981 */
/* 0x000ea8000c1e1b00 */
/*02f0*/ LDG.E.64 R10, [R4.64+0x800] ; /* 0x00080004040a7981 */
/* 0x000ea4000c1e1b00 */
/*0300*/ LOP3.LUT R8, R8, R10, RZ, 0xc0, !PT ; /* 0x0000000a08087212 */
/* 0x004fc400078ec0ff */
/*0310*/ LOP3.LUT R9, R9, R11, RZ, 0xc0, !PT ; /* 0x0000000b09097212 */
/* 0x000fca00078ec0ff */
/*0320*/ STG.E.64 [R2.64+0x800], R8 ; /* 0x0008000802007986 */
/* 0x000fe2000c101b04 */
/*0330*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0340*/ BRA 0x340; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0350*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0360*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0370*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0380*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0390*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _ZN3cub17CUB_200700_800_NS11EmptyKernelIvEEvv
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0020*/ BRA 0x20; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0030*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0040*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0050*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0060*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0070*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0080*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0090*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
|
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
|
#if __linux__ && defined(__INTEL_COMPILER)
#define __sync_fetch_and_add(ptr,addend) _InterlockedExchangeAdd(const_cast<void*>(reinterpret_cast<volatile void*>(ptr)), addend)
#endif
#include <string>
#include <cstring>
#include <cctype>
#include <cstdlib>
#include <cstdio>
#include <iostream>
#include <fstream>
#include <bitset>
#include <time.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include "utility.h"
#include "csv.hpp"
#include<thrust/host_vector.h>
#include<thrust/device_vector.h>
#include<thrust/sort.h>
using namespace std;
std::vector<string> sv;
std::vector<string> sourceIP;
std::vector<string> destinationIP;
std::vector<string> timestamp;
std::vector<string> IPstring_src;
std::vector<string> IPstring_dst;
std::vector<std::string> split_string_2(std::string str, char del) {
int first = 0;
int last = str.find_first_of(del);
std::vector<std::string> result;
while (first < str.size()) {
std::string subStr(str, first, last - first);
result.push_back(subStr);
first = last + 1;
last = str.find_first_of(del, first);
if (last == std::string::npos) {
last = str.size();
}
}
return result;
}
int main( int argc, char* argv[] ) {
int counter = 0;
struct timespec startTime, endTime, sleepTime;
int N = atoi(argv[2]);
int netmask;
std::map <int,int> found_flag;
const string session_file = std::string(argv[1]);
vector<vector<string>> session_data;
try {
Csv objCsv(session_file);
if (!objCsv.getCsv(session_data)) {
cout << "read ERROR" << endl;
return 1;
}
}
catch (...) {
cout << "EXCEPTION (session)" << endl;
return 1;
}
thrust::host_vector<unsigned long> V1(session_data.size());
thrust::host_vector<unsigned long> V2(session_data.size());
thrust::host_vector<unsigned long> V3(session_data.size());
clock_gettime(CLOCK_REALTIME, &startTime);
sleepTime.tv_sec = 0;
sleepTime.tv_nsec = 123;
for (unsigned int row2 = 0; row2 < session_data.size(); row2++) {
vector<string> rec2 = session_data[row2];
std::string srcIP = rec2[4];
for(size_t c = srcIP.find_first_of("\""); c != string::npos; c = c = srcIP.find_first_of("\"")){
srcIP.erase(c,1);
}
char del2 ='.';
std::string sessionIPstring;
for (const auto subStr : split_string_2(srcIP, del2)) {
unsigned long ipaddr_src;
ipaddr_src = stoul(subStr.c_str());
std::bitset<8> trans = std::bitset<8>(ipaddr_src);
std::string trans_string = trans.to_string();
sessionIPstring = sessionIPstring + trans_string;
}
std::bitset<32> bit_sessionIP(sessionIPstring);
std::bitset<32> bit_sessionIP_cuda(sessionIPstring);
// cout << srcIP << "," << bit_sessionIP.to_ulong() << endl;
/* mask 1 */
int netmask=24;
std::bitset<32> trans2(0xFFFFFFFF);
trans2 <<= 32-netmask;
// trans2 >>= netmask;
bit_sessionIP &= trans2;
std::string rev_string = bit_sessionIP.to_string();
string bs1 = rev_string.substr(0,8);
int bi1 = bitset<8>(bs1).to_ulong();
string bs2 = rev_string.substr(8,8);
int bi2 = bitset<8>(bs2).to_ulong();
string bs3 = rev_string.substr(16,8);
int bi3 = bitset<8>(bs3).to_ulong();
string bs4 = rev_string.substr(24,8);
int bi4 = bitset<8>(bs4).to_ulong();
string revIP = to_string(bi1) + "." + to_string(bi2) + "." + to_string(bi3) + "." + to_string(bi4);
if(row2 < 10)
{
cout << srcIP << "," << bit_sessionIP_cuda.to_ulong() << "," << bit_sessionIP_cuda << "," << trans2 << "," << revIP << endl;
}
}
clock_gettime(CLOCK_REALTIME, &endTime);
if (endTime.tv_nsec < startTime.tv_nsec) {
printf("%10ld.%09ld", endTime.tv_sec - startTime.tv_sec - 1, endTime.tv_nsec + 1000000000 - startTime.tv_nsec);
} else {
printf("%10ld.%09ld", endTime.tv_sec - startTime.tv_sec,endTime.tv_nsec - startTime.tv_nsec);
}
printf(" sec\n");
clock_gettime(CLOCK_REALTIME, &startTime);
sleepTime.tv_sec = 0;
sleepTime.tv_nsec = 123;
for (unsigned int row2 = 0; row2 < session_data.size(); row2++) {
vector<string> rec2 = session_data[row2];
std::string srcIP = rec2[4];
for(size_t c = srcIP.find_first_of("\""); c != string::npos; c = c = srcIP.find_first_of("\"")){
srcIP.erase(c,1);
}
char del2 ='.';
std::string sessionIPstring;
for (const auto subStr : split_string_2(srcIP, del2)) {
unsigned long ipaddr_src;
ipaddr_src = stoul(subStr.c_str());
std::bitset<8> trans = std::bitset<8>(ipaddr_src);
std::string trans_string = trans.to_string();
sessionIPstring = sessionIPstring + trans_string;
}
std::bitset<32> bit_sessionIP_cuda(sessionIPstring);
V1[row2] = bit_sessionIP_cuda.to_ulong();
}
netmask=24;
std::bitset<32> trans2(0xFFFFFFFF);
// trans2 <<= netmask;
trans2 <<= 32-netmask;
thrust::fill(V2.begin(), V2.end(), trans2.to_ulong());
thrust::device_vector<unsigned long> DV1 = V1;
thrust::device_vector<unsigned long> DV2 = V2;
thrust::device_vector<unsigned long> DV3 = V3;
thrust::transform(DV1.begin(), DV1.end(), DV2.begin(), DV3.begin(), thrust::bit_and<unsigned long>());
for(int i = 0; i < 10; i++)
{
std::bitset<32> bs1(DV1[i]);
std::bitset<32> cuda_sessionIP(DV3[i]);
std::string rev_string_2 = cuda_sessionIP.to_string();
string rev1 = rev_string_2.substr(0,8);
int bi1 = bitset<8>(rev1).to_ulong();
string rev2 = rev_string_2.substr(8,8);
int bi2 = bitset<8>(rev2).to_ulong();
string rev3 = rev_string_2.substr(16,8);
int bi3 = bitset<8>(rev3).to_ulong();
string rev4 = rev_string_2.substr(24,8);
int bi4 = bitset<8>(rev4).to_ulong();
string revIP_2 = to_string(bi1) + "." + to_string(bi2) + "." + to_string(bi3) + "." + to_string(bi4);
std::cout << DV1[i] << "," << bs1 << "," << DV2[i] << "," << trans2 << "," << DV3[i] << "," << revIP_2 << std::endl;
}
clock_gettime(CLOCK_REALTIME, &endTime);
if (endTime.tv_nsec < startTime.tv_nsec) {
printf("%10ld.%09ld", endTime.tv_sec - startTime.tv_sec - 1, endTime.tv_nsec + 1000000000 - startTime.tv_nsec);
} else {
printf("%10ld.%09ld", endTime.tv_sec - startTime.tv_sec,endTime.tv_nsec - startTime.tv_nsec);
}
printf(" sec\n");
}
|
#if __linux__ && defined(__INTEL_COMPILER)
#define __sync_fetch_and_add(ptr,addend) _InterlockedExchangeAdd(const_cast<void*>(reinterpret_cast<volatile void*>(ptr)), addend)
#endif
#include <hip/hip_runtime.h>
#include <string>
#include <cstring>
#include <cctype>
#include <cstdlib>
#include <cstdio>
#include <iostream>
#include <fstream>
#include <bitset>
#include <time.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include "utility.h"
#include "csv.hpp"
#include<thrust/host_vector.h>
#include<thrust/device_vector.h>
#include<thrust/sort.h>
using namespace std;
std::vector<string> sv;
std::vector<string> sourceIP;
std::vector<string> destinationIP;
std::vector<string> timestamp;
std::vector<string> IPstring_src;
std::vector<string> IPstring_dst;
std::vector<std::string> split_string_2(std::string str, char del) {
int first = 0;
int last = str.find_first_of(del);
std::vector<std::string> result;
while (first < str.size()) {
std::string subStr(str, first, last - first);
result.push_back(subStr);
first = last + 1;
last = str.find_first_of(del, first);
if (last == std::string::npos) {
last = str.size();
}
}
return result;
}
int main( int argc, char* argv[] ) {
int counter = 0;
struct timespec startTime, endTime, sleepTime;
int N = atoi(argv[2]);
int netmask;
std::map <int,int> found_flag;
const string session_file = std::string(argv[1]);
vector<vector<string>> session_data;
try {
Csv objCsv(session_file);
if (!objCsv.getCsv(session_data)) {
cout << "read ERROR" << endl;
return 1;
}
}
catch (...) {
cout << "EXCEPTION (session)" << endl;
return 1;
}
thrust::host_vector<unsigned long> V1(session_data.size());
thrust::host_vector<unsigned long> V2(session_data.size());
thrust::host_vector<unsigned long> V3(session_data.size());
clock_gettime(CLOCK_REALTIME, &startTime);
sleepTime.tv_sec = 0;
sleepTime.tv_nsec = 123;
for (unsigned int row2 = 0; row2 < session_data.size(); row2++) {
vector<string> rec2 = session_data[row2];
std::string srcIP = rec2[4];
for(size_t c = srcIP.find_first_of("\""); c != string::npos; c = c = srcIP.find_first_of("\"")){
srcIP.erase(c,1);
}
char del2 ='.';
std::string sessionIPstring;
for (const auto subStr : split_string_2(srcIP, del2)) {
unsigned long ipaddr_src;
ipaddr_src = stoul(subStr.c_str());
std::bitset<8> trans = std::bitset<8>(ipaddr_src);
std::string trans_string = trans.to_string();
sessionIPstring = sessionIPstring + trans_string;
}
std::bitset<32> bit_sessionIP(sessionIPstring);
std::bitset<32> bit_sessionIP_cuda(sessionIPstring);
// cout << srcIP << "," << bit_sessionIP.to_ulong() << endl;
/* mask 1 */
int netmask=24;
std::bitset<32> trans2(0xFFFFFFFF);
trans2 <<= 32-netmask;
// trans2 >>= netmask;
bit_sessionIP &= trans2;
std::string rev_string = bit_sessionIP.to_string();
string bs1 = rev_string.substr(0,8);
int bi1 = bitset<8>(bs1).to_ulong();
string bs2 = rev_string.substr(8,8);
int bi2 = bitset<8>(bs2).to_ulong();
string bs3 = rev_string.substr(16,8);
int bi3 = bitset<8>(bs3).to_ulong();
string bs4 = rev_string.substr(24,8);
int bi4 = bitset<8>(bs4).to_ulong();
string revIP = to_string(bi1) + "." + to_string(bi2) + "." + to_string(bi3) + "." + to_string(bi4);
if(row2 < 10)
{
cout << srcIP << "," << bit_sessionIP_cuda.to_ulong() << "," << bit_sessionIP_cuda << "," << trans2 << "," << revIP << endl;
}
}
clock_gettime(CLOCK_REALTIME, &endTime);
if (endTime.tv_nsec < startTime.tv_nsec) {
printf("%10ld.%09ld", endTime.tv_sec - startTime.tv_sec - 1, endTime.tv_nsec + 1000000000 - startTime.tv_nsec);
} else {
printf("%10ld.%09ld", endTime.tv_sec - startTime.tv_sec,endTime.tv_nsec - startTime.tv_nsec);
}
printf(" sec\n");
clock_gettime(CLOCK_REALTIME, &startTime);
sleepTime.tv_sec = 0;
sleepTime.tv_nsec = 123;
for (unsigned int row2 = 0; row2 < session_data.size(); row2++) {
vector<string> rec2 = session_data[row2];
std::string srcIP = rec2[4];
for(size_t c = srcIP.find_first_of("\""); c != string::npos; c = c = srcIP.find_first_of("\"")){
srcIP.erase(c,1);
}
char del2 ='.';
std::string sessionIPstring;
for (const auto subStr : split_string_2(srcIP, del2)) {
unsigned long ipaddr_src;
ipaddr_src = stoul(subStr.c_str());
std::bitset<8> trans = std::bitset<8>(ipaddr_src);
std::string trans_string = trans.to_string();
sessionIPstring = sessionIPstring + trans_string;
}
std::bitset<32> bit_sessionIP_cuda(sessionIPstring);
V1[row2] = bit_sessionIP_cuda.to_ulong();
}
netmask=24;
std::bitset<32> trans2(0xFFFFFFFF);
// trans2 <<= netmask;
trans2 <<= 32-netmask;
thrust::fill(V2.begin(), V2.end(), trans2.to_ulong());
thrust::device_vector<unsigned long> DV1 = V1;
thrust::device_vector<unsigned long> DV2 = V2;
thrust::device_vector<unsigned long> DV3 = V3;
thrust::transform(DV1.begin(), DV1.end(), DV2.begin(), DV3.begin(), thrust::bit_and<unsigned long>());
for(int i = 0; i < 10; i++)
{
std::bitset<32> bs1(DV1[i]);
std::bitset<32> cuda_sessionIP(DV3[i]);
std::string rev_string_2 = cuda_sessionIP.to_string();
string rev1 = rev_string_2.substr(0,8);
int bi1 = bitset<8>(rev1).to_ulong();
string rev2 = rev_string_2.substr(8,8);
int bi2 = bitset<8>(rev2).to_ulong();
string rev3 = rev_string_2.substr(16,8);
int bi3 = bitset<8>(rev3).to_ulong();
string rev4 = rev_string_2.substr(24,8);
int bi4 = bitset<8>(rev4).to_ulong();
string revIP_2 = to_string(bi1) + "." + to_string(bi2) + "." + to_string(bi3) + "." + to_string(bi4);
std::cout << DV1[i] << "," << bs1 << "," << DV2[i] << "," << trans2 << "," << DV3[i] << "," << revIP_2 << std::endl;
}
clock_gettime(CLOCK_REALTIME, &endTime);
if (endTime.tv_nsec < startTime.tv_nsec) {
printf("%10ld.%09ld", endTime.tv_sec - startTime.tv_sec - 1, endTime.tv_nsec + 1000000000 - startTime.tv_nsec);
} else {
printf("%10ld.%09ld", endTime.tv_sec - startTime.tv_sec,endTime.tv_nsec - startTime.tv_nsec);
}
printf(" sec\n");
}
|
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
|
#if __linux__ && defined(__INTEL_COMPILER)
#define __sync_fetch_and_add(ptr,addend) _InterlockedExchangeAdd(const_cast<void*>(reinterpret_cast<volatile void*>(ptr)), addend)
#endif
#include <hip/hip_runtime.h>
#include <string>
#include <cstring>
#include <cctype>
#include <cstdlib>
#include <cstdio>
#include <iostream>
#include <fstream>
#include <bitset>
#include <time.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include "utility.h"
#include "csv.hpp"
#include<thrust/host_vector.h>
#include<thrust/device_vector.h>
#include<thrust/sort.h>
using namespace std;
std::vector<string> sv;
std::vector<string> sourceIP;
std::vector<string> destinationIP;
std::vector<string> timestamp;
std::vector<string> IPstring_src;
std::vector<string> IPstring_dst;
std::vector<std::string> split_string_2(std::string str, char del) {
int first = 0;
int last = str.find_first_of(del);
std::vector<std::string> result;
while (first < str.size()) {
std::string subStr(str, first, last - first);
result.push_back(subStr);
first = last + 1;
last = str.find_first_of(del, first);
if (last == std::string::npos) {
last = str.size();
}
}
return result;
}
int main( int argc, char* argv[] ) {
int counter = 0;
struct timespec startTime, endTime, sleepTime;
int N = atoi(argv[2]);
int netmask;
std::map <int,int> found_flag;
const string session_file = std::string(argv[1]);
vector<vector<string>> session_data;
try {
Csv objCsv(session_file);
if (!objCsv.getCsv(session_data)) {
cout << "read ERROR" << endl;
return 1;
}
}
catch (...) {
cout << "EXCEPTION (session)" << endl;
return 1;
}
thrust::host_vector<unsigned long> V1(session_data.size());
thrust::host_vector<unsigned long> V2(session_data.size());
thrust::host_vector<unsigned long> V3(session_data.size());
clock_gettime(CLOCK_REALTIME, &startTime);
sleepTime.tv_sec = 0;
sleepTime.tv_nsec = 123;
for (unsigned int row2 = 0; row2 < session_data.size(); row2++) {
vector<string> rec2 = session_data[row2];
std::string srcIP = rec2[4];
for(size_t c = srcIP.find_first_of("\""); c != string::npos; c = c = srcIP.find_first_of("\"")){
srcIP.erase(c,1);
}
char del2 ='.';
std::string sessionIPstring;
for (const auto subStr : split_string_2(srcIP, del2)) {
unsigned long ipaddr_src;
ipaddr_src = stoul(subStr.c_str());
std::bitset<8> trans = std::bitset<8>(ipaddr_src);
std::string trans_string = trans.to_string();
sessionIPstring = sessionIPstring + trans_string;
}
std::bitset<32> bit_sessionIP(sessionIPstring);
std::bitset<32> bit_sessionIP_cuda(sessionIPstring);
// cout << srcIP << "," << bit_sessionIP.to_ulong() << endl;
/* mask 1 */
int netmask=24;
std::bitset<32> trans2(0xFFFFFFFF);
trans2 <<= 32-netmask;
// trans2 >>= netmask;
bit_sessionIP &= trans2;
std::string rev_string = bit_sessionIP.to_string();
string bs1 = rev_string.substr(0,8);
int bi1 = bitset<8>(bs1).to_ulong();
string bs2 = rev_string.substr(8,8);
int bi2 = bitset<8>(bs2).to_ulong();
string bs3 = rev_string.substr(16,8);
int bi3 = bitset<8>(bs3).to_ulong();
string bs4 = rev_string.substr(24,8);
int bi4 = bitset<8>(bs4).to_ulong();
string revIP = to_string(bi1) + "." + to_string(bi2) + "." + to_string(bi3) + "." + to_string(bi4);
if(row2 < 10)
{
cout << srcIP << "," << bit_sessionIP_cuda.to_ulong() << "," << bit_sessionIP_cuda << "," << trans2 << "," << revIP << endl;
}
}
clock_gettime(CLOCK_REALTIME, &endTime);
if (endTime.tv_nsec < startTime.tv_nsec) {
printf("%10ld.%09ld", endTime.tv_sec - startTime.tv_sec - 1, endTime.tv_nsec + 1000000000 - startTime.tv_nsec);
} else {
printf("%10ld.%09ld", endTime.tv_sec - startTime.tv_sec,endTime.tv_nsec - startTime.tv_nsec);
}
printf(" sec\n");
clock_gettime(CLOCK_REALTIME, &startTime);
sleepTime.tv_sec = 0;
sleepTime.tv_nsec = 123;
for (unsigned int row2 = 0; row2 < session_data.size(); row2++) {
vector<string> rec2 = session_data[row2];
std::string srcIP = rec2[4];
for(size_t c = srcIP.find_first_of("\""); c != string::npos; c = c = srcIP.find_first_of("\"")){
srcIP.erase(c,1);
}
char del2 ='.';
std::string sessionIPstring;
for (const auto subStr : split_string_2(srcIP, del2)) {
unsigned long ipaddr_src;
ipaddr_src = stoul(subStr.c_str());
std::bitset<8> trans = std::bitset<8>(ipaddr_src);
std::string trans_string = trans.to_string();
sessionIPstring = sessionIPstring + trans_string;
}
std::bitset<32> bit_sessionIP_cuda(sessionIPstring);
V1[row2] = bit_sessionIP_cuda.to_ulong();
}
netmask=24;
std::bitset<32> trans2(0xFFFFFFFF);
// trans2 <<= netmask;
trans2 <<= 32-netmask;
thrust::fill(V2.begin(), V2.end(), trans2.to_ulong());
thrust::device_vector<unsigned long> DV1 = V1;
thrust::device_vector<unsigned long> DV2 = V2;
thrust::device_vector<unsigned long> DV3 = V3;
thrust::transform(DV1.begin(), DV1.end(), DV2.begin(), DV3.begin(), thrust::bit_and<unsigned long>());
for(int i = 0; i < 10; i++)
{
std::bitset<32> bs1(DV1[i]);
std::bitset<32> cuda_sessionIP(DV3[i]);
std::string rev_string_2 = cuda_sessionIP.to_string();
string rev1 = rev_string_2.substr(0,8);
int bi1 = bitset<8>(rev1).to_ulong();
string rev2 = rev_string_2.substr(8,8);
int bi2 = bitset<8>(rev2).to_ulong();
string rev3 = rev_string_2.substr(16,8);
int bi3 = bitset<8>(rev3).to_ulong();
string rev4 = rev_string_2.substr(24,8);
int bi4 = bitset<8>(rev4).to_ulong();
string revIP_2 = to_string(bi1) + "." + to_string(bi2) + "." + to_string(bi3) + "." + to_string(bi4);
std::cout << DV1[i] << "," << bs1 << "," << DV2[i] << "," << trans2 << "," << DV3[i] << "," << revIP_2 << std::endl;
}
clock_gettime(CLOCK_REALTIME, &endTime);
if (endTime.tv_nsec < startTime.tv_nsec) {
printf("%10ld.%09ld", endTime.tv_sec - startTime.tv_sec - 1, endTime.tv_nsec + 1000000000 - startTime.tv_nsec);
} else {
printf("%10ld.%09ld", endTime.tv_sec - startTime.tv_sec,endTime.tv_nsec - startTime.tv_nsec);
}
printf(" sec\n");
}
|
.text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.section .text._ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform18binary_transform_fINS_6detail15normal_iteratorINS_10device_ptrImEEEES9_S9_NS3_14no_stencil_tagENS_7bit_andImEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_,"axG",@progbits,_ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform18binary_transform_fINS_6detail15normal_iteratorINS_10device_ptrImEEEES9_S9_NS3_14no_stencil_tagENS_7bit_andImEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_,comdat
.protected _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform18binary_transform_fINS_6detail15normal_iteratorINS_10device_ptrImEEEES9_S9_NS3_14no_stencil_tagENS_7bit_andImEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_
.globl _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform18binary_transform_fINS_6detail15normal_iteratorINS_10device_ptrImEEEES9_S9_NS3_14no_stencil_tagENS_7bit_andImEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_
.p2align 8
.type _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform18binary_transform_fINS_6detail15normal_iteratorINS_10device_ptrImEEEES9_S9_NS3_14no_stencil_tagENS_7bit_andImEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_,@function
_ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform18binary_transform_fINS_6detail15normal_iteratorINS_10device_ptrImEEEES9_S9_NS3_14no_stencil_tagENS_7bit_andImEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_:
s_load_b128 s[4:7], s[0:1], 0x20
s_lshl_b32 s2, s15, 8
s_waitcnt lgkmcnt(0)
s_add_u32 s2, s2, s6
s_addc_u32 s3, 0, s7
s_sub_u32 s4, s4, s2
s_subb_u32 s5, s5, s3
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_gt_i64_e64 s5, 0x100, s[4:5]
s_and_b32 s5, s5, exec_lo
s_cselect_b32 s4, s4, 0x100
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
v_cmp_gt_u32_e32 vcc_lo, s4, v0
s_cmpk_eq_i32 s4, 0x100
s_cselect_b32 s4, -1, 0
s_or_b32 s4, s4, vcc_lo
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s5, s4
s_cbranch_execz .LBB0_2
s_load_b128 s[4:7], s[0:1], 0x0
v_add_co_u32 v0, s2, s2, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e64 v1, null, s3, 0, s2
s_load_b64 s[0:1], s[0:1], 0x10
v_lshlrev_b64 v[0:1], 3, v[0:1]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, s4, v0
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
flat_load_b64 v[2:3], v[2:3]
flat_load_b64 v[4:5], v[4:5]
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt vmcnt(0) lgkmcnt(0)
v_and_b32_e32 v3, v5, v3
v_and_b32_e32 v2, v4, v2
flat_store_b64 v[0:1], v[2:3]
.LBB0_2:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform18binary_transform_fINS_6detail15normal_iteratorINS_10device_ptrImEEEES9_S9_NS3_14no_stencil_tagENS_7bit_andImEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 48
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.section .text._ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform18binary_transform_fINS_6detail15normal_iteratorINS_10device_ptrImEEEES9_S9_NS3_14no_stencil_tagENS_7bit_andImEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_,"axG",@progbits,_ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform18binary_transform_fINS_6detail15normal_iteratorINS_10device_ptrImEEEES9_S9_NS3_14no_stencil_tagENS_7bit_andImEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_,comdat
.Lfunc_end0:
.size _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform18binary_transform_fINS_6detail15normal_iteratorINS_10device_ptrImEEEES9_S9_NS3_14no_stencil_tagENS_7bit_andImEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_, .Lfunc_end0-_ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform18binary_transform_fINS_6detail15normal_iteratorINS_10device_ptrImEEEES9_S9_NS3_14no_stencil_tagENS_7bit_andImEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 32
.value_kind: by_value
- .offset: 32
.size: 8
.value_kind: by_value
- .offset: 40
.size: 8
.value_kind: by_value
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 48
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 256
.name: _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform18binary_transform_fINS_6detail15normal_iteratorINS_10device_ptrImEEEES9_S9_NS3_14no_stencil_tagENS_7bit_andImEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform18binary_transform_fINS_6detail15normal_iteratorINS_10device_ptrImEEEES9_S9_NS3_14no_stencil_tagENS_7bit_andImEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata
|
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
|
code for sm_80
Function : _ZN3cub17CUB_200700_800_NS6detail8for_each13static_kernelINS2_12policy_hub_t12policy_350_tElN6thrust20THRUST_200700_800_NS8cuda_cub11__transform18binary_transform_fINS7_6detail15normal_iteratorINS7_10device_ptrImEEEESF_SF_NS9_14no_stencil_tagENS7_7bit_andImEENS9_21always_true_predicateEEEEEvT0_T1_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */
/* 0x000e220000002500 */
/*0020*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*0030*/ S2R R9, SR_TID.X ; /* 0x0000000000097919 */
/* 0x000e620000002100 */
/*0040*/ IMAD.WIDE.U32 R2, R2, 0x200, RZ ; /* 0x0000020002027825 */
/* 0x001fca00078e00ff */
/*0050*/ IADD3 R0, P1, R2.reuse, R9, RZ ; /* 0x0000000902007210 */
/* 0x042fe40007f3e0ff */
/*0060*/ IADD3 R8, P0, -R2, c[0x0][0x160], RZ ; /* 0x0000580002087a10 */
/* 0x000fc60007f1e1ff */
/*0070*/ IMAD.X R5, RZ, RZ, R3, P1 ; /* 0x000000ffff057224 */
/* 0x000fe200008e0603 */
/*0080*/ IADD3.X R3, ~R3, c[0x0][0x164], RZ, P0, !PT ; /* 0x0000590003037a10 */
/* 0x000fe200007fe5ff */
/*0090*/ IMAD.SHL.U32 R2, R0, 0x8, RZ ; /* 0x0000000800027824 */
/* 0x000fe200078e00ff */
/*00a0*/ ISETP.GT.U32.AND P0, PT, R8, 0x1ff, PT ; /* 0x000001ff0800780c */
/* 0x000fe40003f04070 */
/*00b0*/ SHF.L.U64.HI R0, R0, 0x3, R5 ; /* 0x0000000300007819 */
/* 0x000fe40000010205 */
/*00c0*/ ISETP.GT.AND.EX P0, PT, R3, RZ, PT, P0 ; /* 0x000000ff0300720c */
/* 0x000fe40003f04300 */
/*00d0*/ IADD3 R4, P1, R2.reuse, c[0x0][0x168], RZ ; /* 0x00005a0002047a10 */
/* 0x040fe40007f3e0ff */
/*00e0*/ IADD3 R6, P2, R2, c[0x0][0x170], RZ ; /* 0x00005c0002067a10 */
/* 0x000fc40007f5e0ff */
/*00f0*/ IADD3 R2, P3, R2, c[0x0][0x178], RZ ; /* 0x00005e0002027a10 */
/* 0x000fe40007f7e0ff */
/*0100*/ IADD3.X R5, R0.reuse, c[0x0][0x16c], RZ, P1, !PT ; /* 0x00005b0000057a10 */
/* 0x040fe40000ffe4ff */
/*0110*/ IADD3.X R7, R0.reuse, c[0x0][0x174], RZ, P2, !PT ; /* 0x00005d0000077a10 */
/* 0x040fe400017fe4ff */
/*0120*/ IADD3.X R3, R0, c[0x0][0x17c], RZ, P3, !PT ; /* 0x00005f0000037a10 */
/* 0x000fe20001ffe4ff */
/*0130*/ @P0 BRA 0x290 ; /* 0x0000015000000947 */
/* 0x000fea0003800000 */
/*0140*/ IADD3 R0, R9, 0x100, RZ ; /* 0x0000010009007810 */
/* 0x000fe20007ffe0ff */
/*0150*/ BSSY B0, 0x220 ; /* 0x000000c000007945 */
/* 0x000fe20003800000 */
/*0160*/ ISETP.GT.U32.AND P0, PT, R8, R9, PT ; /* 0x000000090800720c */
/* 0x000fe40003f04070 */
/*0170*/ SHF.R.S32.HI R9, RZ, 0x1f, R8 ; /* 0x0000001fff097819 */
/* 0x000fc40000011408 */
/*0180*/ ISETP.GT.U32.AND P1, PT, R8, R0, PT ; /* 0x000000000800720c */
/* 0x000fe40003f24070 */
/*0190*/ ISETP.GT.AND.EX P0, PT, R9.reuse, RZ, PT, P0 ; /* 0x000000ff0900720c */
/* 0x040fe40003f04300 */
/*01a0*/ ISETP.GT.AND.EX P1, PT, R9, RZ, PT, P1 ; /* 0x000000ff0900720c */
/* 0x000fd60003f24310 */
/*01b0*/ @!P0 BRA 0x210 ; /* 0x0000005000008947 */
/* 0x000fea0003800000 */
/*01c0*/ LDG.E.64 R8, [R4.64] ; /* 0x0000000404087981 */
/* 0x000ea8000c1e1b00 */
/*01d0*/ LDG.E.64 R10, [R6.64] ; /* 0x00000004060a7981 */
/* 0x000ea4000c1e1b00 */
/*01e0*/ LOP3.LUT R8, R10, R8, RZ, 0xc0, !PT ; /* 0x000000080a087212 */
/* 0x004fe400078ec0ff */
/*01f0*/ LOP3.LUT R9, R11, R9, RZ, 0xc0, !PT ; /* 0x000000090b097212 */
/* 0x000fca00078ec0ff */
/*0200*/ STG.E.64 [R2.64], R8 ; /* 0x0000000802007986 */
/* 0x0001e4000c101b04 */
/*0210*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0220*/ @!P1 EXIT ; /* 0x000000000000994d */
/* 0x000fea0003800000 */
/*0230*/ LDG.E.64 R6, [R6.64+0x800] ; /* 0x0008000406067981 */
/* 0x000ea8000c1e1b00 */
/*0240*/ LDG.E.64 R4, [R4.64+0x800] ; /* 0x0008000404047981 */
/* 0x000ea4000c1e1b00 */
/*0250*/ LOP3.LUT R8, R6, R4, RZ, 0xc0, !PT ; /* 0x0000000406087212 */
/* 0x005fe400078ec0ff */
/*0260*/ LOP3.LUT R9, R7, R5, RZ, 0xc0, !PT ; /* 0x0000000507097212 */
/* 0x000fca00078ec0ff */
/*0270*/ STG.E.64 [R2.64+0x800], R8 ; /* 0x0008000802007986 */
/* 0x000fe2000c101b04 */
/*0280*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0290*/ LDG.E.64 R8, [R4.64] ; /* 0x0000000404087981 */
/* 0x000ea8000c1e1b00 */
/*02a0*/ LDG.E.64 R10, [R6.64] ; /* 0x00000004060a7981 */
/* 0x000ea4000c1e1b00 */
/*02b0*/ LOP3.LUT R12, R10, R8, RZ, 0xc0, !PT ; /* 0x000000080a0c7212 */
/* 0x004fe400078ec0ff */
/*02c0*/ LOP3.LUT R13, R11, R9, RZ, 0xc0, !PT ; /* 0x000000090b0d7212 */
/* 0x000fca00078ec0ff */
/*02d0*/ STG.E.64 [R2.64], R12 ; /* 0x0000000c02007986 */
/* 0x000fe8000c101b04 */
/*02e0*/ LDG.E.64 R8, [R6.64+0x800] ; /* 0x0008000406087981 */
/* 0x000ea8000c1e1b00 */
/*02f0*/ LDG.E.64 R10, [R4.64+0x800] ; /* 0x00080004040a7981 */
/* 0x000ea4000c1e1b00 */
/*0300*/ LOP3.LUT R8, R8, R10, RZ, 0xc0, !PT ; /* 0x0000000a08087212 */
/* 0x004fc400078ec0ff */
/*0310*/ LOP3.LUT R9, R9, R11, RZ, 0xc0, !PT ; /* 0x0000000b09097212 */
/* 0x000fca00078ec0ff */
/*0320*/ STG.E.64 [R2.64+0x800], R8 ; /* 0x0008000802007986 */
/* 0x000fe2000c101b04 */
/*0330*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0340*/ BRA 0x340; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0350*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0360*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0370*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0380*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0390*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _ZN3cub17CUB_200700_800_NS11EmptyKernelIvEEvv
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0020*/ BRA 0x20; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0030*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0040*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0050*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0060*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0070*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0080*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0090*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
|
.text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.section .text._ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform18binary_transform_fINS_6detail15normal_iteratorINS_10device_ptrImEEEES9_S9_NS3_14no_stencil_tagENS_7bit_andImEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_,"axG",@progbits,_ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform18binary_transform_fINS_6detail15normal_iteratorINS_10device_ptrImEEEES9_S9_NS3_14no_stencil_tagENS_7bit_andImEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_,comdat
.protected _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform18binary_transform_fINS_6detail15normal_iteratorINS_10device_ptrImEEEES9_S9_NS3_14no_stencil_tagENS_7bit_andImEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_
.globl _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform18binary_transform_fINS_6detail15normal_iteratorINS_10device_ptrImEEEES9_S9_NS3_14no_stencil_tagENS_7bit_andImEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_
.p2align 8
.type _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform18binary_transform_fINS_6detail15normal_iteratorINS_10device_ptrImEEEES9_S9_NS3_14no_stencil_tagENS_7bit_andImEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_,@function
_ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform18binary_transform_fINS_6detail15normal_iteratorINS_10device_ptrImEEEES9_S9_NS3_14no_stencil_tagENS_7bit_andImEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_:
s_load_b128 s[4:7], s[0:1], 0x20
s_lshl_b32 s2, s15, 8
s_waitcnt lgkmcnt(0)
s_add_u32 s2, s2, s6
s_addc_u32 s3, 0, s7
s_sub_u32 s4, s4, s2
s_subb_u32 s5, s5, s3
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_gt_i64_e64 s5, 0x100, s[4:5]
s_and_b32 s5, s5, exec_lo
s_cselect_b32 s4, s4, 0x100
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
v_cmp_gt_u32_e32 vcc_lo, s4, v0
s_cmpk_eq_i32 s4, 0x100
s_cselect_b32 s4, -1, 0
s_or_b32 s4, s4, vcc_lo
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s5, s4
s_cbranch_execz .LBB0_2
s_load_b128 s[4:7], s[0:1], 0x0
v_add_co_u32 v0, s2, s2, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e64 v1, null, s3, 0, s2
s_load_b64 s[0:1], s[0:1], 0x10
v_lshlrev_b64 v[0:1], 3, v[0:1]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, s4, v0
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
flat_load_b64 v[2:3], v[2:3]
flat_load_b64 v[4:5], v[4:5]
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt vmcnt(0) lgkmcnt(0)
v_and_b32_e32 v3, v5, v3
v_and_b32_e32 v2, v4, v2
flat_store_b64 v[0:1], v[2:3]
.LBB0_2:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform18binary_transform_fINS_6detail15normal_iteratorINS_10device_ptrImEEEES9_S9_NS3_14no_stencil_tagENS_7bit_andImEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 48
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.section .text._ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform18binary_transform_fINS_6detail15normal_iteratorINS_10device_ptrImEEEES9_S9_NS3_14no_stencil_tagENS_7bit_andImEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_,"axG",@progbits,_ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform18binary_transform_fINS_6detail15normal_iteratorINS_10device_ptrImEEEES9_S9_NS3_14no_stencil_tagENS_7bit_andImEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_,comdat
.Lfunc_end0:
.size _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform18binary_transform_fINS_6detail15normal_iteratorINS_10device_ptrImEEEES9_S9_NS3_14no_stencil_tagENS_7bit_andImEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_, .Lfunc_end0-_ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform18binary_transform_fINS_6detail15normal_iteratorINS_10device_ptrImEEEES9_S9_NS3_14no_stencil_tagENS_7bit_andImEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 32
.value_kind: by_value
- .offset: 32
.size: 8
.value_kind: by_value
- .offset: 40
.size: 8
.value_kind: by_value
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 48
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 256
.name: _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform18binary_transform_fINS_6detail15normal_iteratorINS_10device_ptrImEEEES9_S9_NS3_14no_stencil_tagENS_7bit_andImEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _ZN6thrust11hip_rocprim14__parallel_for6kernelILj256ENS0_11__transform18binary_transform_fINS_6detail15normal_iteratorINS_10device_ptrImEEEES9_S9_NS3_14no_stencil_tagENS_7bit_andImEENS3_21always_true_predicateEEElLj1EEEvT0_T1_SG_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata
|
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
|
#include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/inner_product.h>
#include <thrust/reduce.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/sort.h>
#include <iostream>
typedef thrust::device_vector<int> int_vec;
template <typename Vector>
void print_vector(const Vector& v) {
typedef typename Vector::value_type T;
thrust::copy(v.begin(), v.end(), std::ostream_iterator<T>(std::cout, " "));
std::cout << std::endl;
}
void sparse_histogram(int_vec &data,
int_vec &hist_values,
int_vec &hist_counts) {
// sort data
thrust::sort(data.begin(), data.end());
// number of bins = number of unique values in data (assumes data.size() > 0)
int num_bins = thrust::inner_product(data.begin(), data.end() - 1,
data.begin() + 1,
1,
thrust::plus<int>(),
thrust::not_equal_to<int>());
// resize histogram to number of unique elements
hist_values.resize(num_bins);
hist_counts.resize(num_bins);
// calculate number of elements per bin
thrust::reduce_by_key(data.begin(), data.end(),
thrust::constant_iterator<int>(1),
hist_values.begin(),
hist_counts.begin());
}
void histogram(int_vec &data, int_vec &dense_hist) {
thrust::device_vector<int> sparse_hist_values;
thrust::device_vector<int> sparse_hist_counts;
sparse_histogram(data, sparse_hist_values, sparse_hist_counts);
thrust::fill(dense_hist.begin(), dense_hist.end(), 0);
thrust::scatter(sparse_hist_counts.begin(),
sparse_hist_counts.end(),
sparse_hist_values.begin(),
dense_hist.begin());
}
int main() {
const int num_bins = 10;
thrust::host_vector<int> H(10);
H[0] = 1;
H[1] = 1;
H[2] = 3;
H[3] = 6;
H[4] = 1;
H[5] = 1;
H[6] = 5;
H[7] = 6;
H[8] = 7;
H[9] = 6;
// Copy host_vector H to device_vector D
thrust::device_vector<int> D = H;
int_vec hist(num_bins);
histogram(D, hist);
std::cout << "Values:" << std::endl;
print_vector(D);
std::cout << "Histogram:" << std::endl;
print_vector(hist);
return 0;
}
/*
https://www.youtube.com/watch?v=cGffGYBbtbk
https://github.com/thrust/thrust/blob/master/examples/histogram.cu
*/
|
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/inner_product.h>
#include <thrust/reduce.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/sort.h>
#include <iostream>
typedef thrust::device_vector<int> int_vec;
template <typename Vector>
void print_vector(const Vector& v) {
typedef typename Vector::value_type T;
thrust::copy(v.begin(), v.end(), std::ostream_iterator<T>(std::cout, " "));
std::cout << std::endl;
}
void sparse_histogram(int_vec &data,
int_vec &hist_values,
int_vec &hist_counts) {
// sort data
thrust::sort(data.begin(), data.end());
// number of bins = number of unique values in data (assumes data.size() > 0)
int num_bins = thrust::inner_product(data.begin(), data.end() - 1,
data.begin() + 1,
1,
thrust::plus<int>(),
thrust::not_equal_to<int>());
// resize histogram to number of unique elements
hist_values.resize(num_bins);
hist_counts.resize(num_bins);
// calculate number of elements per bin
thrust::reduce_by_key(data.begin(), data.end(),
thrust::constant_iterator<int>(1),
hist_values.begin(),
hist_counts.begin());
}
void histogram(int_vec &data, int_vec &dense_hist) {
thrust::device_vector<int> sparse_hist_values;
thrust::device_vector<int> sparse_hist_counts;
sparse_histogram(data, sparse_hist_values, sparse_hist_counts);
thrust::fill(dense_hist.begin(), dense_hist.end(), 0);
thrust::scatter(sparse_hist_counts.begin(),
sparse_hist_counts.end(),
sparse_hist_values.begin(),
dense_hist.begin());
}
int main() {
const int num_bins = 10;
thrust::host_vector<int> H(10);
H[0] = 1;
H[1] = 1;
H[2] = 3;
H[3] = 6;
H[4] = 1;
H[5] = 1;
H[6] = 5;
H[7] = 6;
H[8] = 7;
H[9] = 6;
// Copy host_vector H to device_vector D
thrust::device_vector<int> D = H;
int_vec hist(num_bins);
histogram(D, hist);
std::cout << "Values:" << std::endl;
print_vector(D);
std::cout << "Histogram:" << std::endl;
print_vector(hist);
return 0;
}
/*
https://www.youtube.com/watch?v=cGffGYBbtbk
https://github.com/thrust/thrust/blob/master/examples/histogram.cu
*/
|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
|
#include <stdio.h>
extern "C"
int example(int a, int b){
return a + b;
}
extern "C"
void example2(int input){
printf("%s%d\n", "hello ", input);
}
extern "C"
void gpuTest(void){
int n = 10;
int a[n], b[n], c[n];
int *dev_a, *dev_b, *dev_c;
cudaMalloc( (void**)&dev_a, n * sizeof(int) );
cudaMalloc( (void**)&dev_b, n * sizeof(int) );
cudaMalloc( (void**)&dev_c, n * sizeof(int) );
for (int i=0; i<n; i++) {
a[i] = -i;
b[i] = i * i;
}
cudaMemcpy( dev_a, a, n * sizeof(int), cudaMemcpyHostToDevice );
cudaMemcpy( dev_b, b, n * sizeof(int), cudaMemcpyHostToDevice );
//add<<<n,1>>>( dev_a, dev_b, dev_c );
// copy the array 'c' back from the GPU to the CPU
cudaMemcpy( c, dev_c, n * sizeof(int), cudaMemcpyDeviceToHost );
// display the results
for (int i=0; i<n; i++) {
printf( "%d + %d = %d\n", a[i], b[i], c[i] );
}
// free the memory allocated on the GPU
cudaFree( dev_a );
cudaFree( dev_b );
cudaFree( dev_c );
}
/*
int main(){
gpuTest();
printf("hello");
}
*/
|
code for sm_80
|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
|
#include <stdio.h>
extern "C"
int example(int a, int b){
return a + b;
}
extern "C"
void example2(int input){
printf("%s%d\n", "hello ", input);
}
extern "C"
void gpuTest(void){
int n = 10;
int a[n], b[n], c[n];
int *dev_a, *dev_b, *dev_c;
cudaMalloc( (void**)&dev_a, n * sizeof(int) );
cudaMalloc( (void**)&dev_b, n * sizeof(int) );
cudaMalloc( (void**)&dev_c, n * sizeof(int) );
for (int i=0; i<n; i++) {
a[i] = -i;
b[i] = i * i;
}
cudaMemcpy( dev_a, a, n * sizeof(int), cudaMemcpyHostToDevice );
cudaMemcpy( dev_b, b, n * sizeof(int), cudaMemcpyHostToDevice );
//add<<<n,1>>>( dev_a, dev_b, dev_c );
// copy the array 'c' back from the GPU to the CPU
cudaMemcpy( c, dev_c, n * sizeof(int), cudaMemcpyDeviceToHost );
// display the results
for (int i=0; i<n; i++) {
printf( "%d + %d = %d\n", a[i], b[i], c[i] );
}
// free the memory allocated on the GPU
cudaFree( dev_a );
cudaFree( dev_b );
cudaFree( dev_c );
}
/*
int main(){
gpuTest();
printf("hello");
}
*/
|
.file "tmpxft_00034919_00000000-6_nyTest.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2062:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2062:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl example
.type example, @function
example:
.LFB2057:
.cfi_startproc
endbr64
leal (%rdi,%rsi), %eax
ret
.cfi_endproc
.LFE2057:
.size example, .-example
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "hello "
.LC1:
.string "%s%d\n"
.text
.globl example2
.type example2, @function
example2:
.LFB2058:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movl %edi, %ecx
leaq .LC0(%rip), %rdx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2058:
.size example2, .-example2
.section .rodata.str1.1
.LC2:
.string "%d + %d = %d\n"
.text
.globl gpuTest
.type gpuTest, @function
gpuTest:
.LFB2059:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $184, %rsp
.cfi_def_cfa_offset 208
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rdi
movl $40, %esi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movl $40, %esi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movl $40, %esi
call cudaMalloc@PLT
movl $0, %eax
.L7:
movl %eax, %edx
negl %edx
movl %edx, 32(%rsp,%rax,4)
movl %eax, %edx
imull %eax, %edx
movl %edx, 80(%rsp,%rax,4)
addq $1, %rax
cmpq $10, %rax
jne .L7
leaq 32(%rsp), %rsi
movl $1, %ecx
movl $40, %edx
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
leaq 80(%rsp), %rsi
movl $1, %ecx
movl $40, %edx
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
leaq 128(%rsp), %rdi
movl $2, %ecx
movl $40, %edx
movq 24(%rsp), %rsi
call cudaMemcpy@PLT
movl $0, %ebx
leaq .LC2(%rip), %rbp
.L8:
movl 80(%rsp,%rbx), %ecx
movl 32(%rsp,%rbx), %edx
movl 128(%rsp,%rbx), %r8d
movq %rbp, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq $40, %rbx
jne .L8
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L13
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L13:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2059:
.size gpuTest, .-gpuTest
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4:
|
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
|
#include <stdio.h>
extern "C"
int example(int a, int b){
return a + b;
}
extern "C"
void example2(int input){
printf("%s%d\n", "hello ", input);
}
extern "C"
void gpuTest(void){
int n = 10;
int a[n], b[n], c[n];
int *dev_a, *dev_b, *dev_c;
cudaMalloc( (void**)&dev_a, n * sizeof(int) );
cudaMalloc( (void**)&dev_b, n * sizeof(int) );
cudaMalloc( (void**)&dev_c, n * sizeof(int) );
for (int i=0; i<n; i++) {
a[i] = -i;
b[i] = i * i;
}
cudaMemcpy( dev_a, a, n * sizeof(int), cudaMemcpyHostToDevice );
cudaMemcpy( dev_b, b, n * sizeof(int), cudaMemcpyHostToDevice );
//add<<<n,1>>>( dev_a, dev_b, dev_c );
// copy the array 'c' back from the GPU to the CPU
cudaMemcpy( c, dev_c, n * sizeof(int), cudaMemcpyDeviceToHost );
// display the results
for (int i=0; i<n; i++) {
printf( "%d + %d = %d\n", a[i], b[i], c[i] );
}
// free the memory allocated on the GPU
cudaFree( dev_a );
cudaFree( dev_b );
cudaFree( dev_c );
}
/*
int main(){
gpuTest();
printf("hello");
}
*/
|
#include <hip/hip_runtime.h>
#include <stdio.h>
extern "C"
int example(int a, int b){
return a + b;
}
extern "C"
void example2(int input){
printf("%s%d\n", "hello ", input);
}
extern "C"
void gpuTest(void){
int n = 10;
int a[n], b[n], c[n];
int *dev_a, *dev_b, *dev_c;
hipMalloc( (void**)&dev_a, n * sizeof(int) );
hipMalloc( (void**)&dev_b, n * sizeof(int) );
hipMalloc( (void**)&dev_c, n * sizeof(int) );
for (int i=0; i<n; i++) {
a[i] = -i;
b[i] = i * i;
}
hipMemcpy( dev_a, a, n * sizeof(int), hipMemcpyHostToDevice );
hipMemcpy( dev_b, b, n * sizeof(int), hipMemcpyHostToDevice );
//add<<<n,1>>>( dev_a, dev_b, dev_c );
// copy the array 'c' back from the GPU to the CPU
hipMemcpy( c, dev_c, n * sizeof(int), hipMemcpyDeviceToHost );
// display the results
for (int i=0; i<n; i++) {
printf( "%d + %d = %d\n", a[i], b[i], c[i] );
}
// free the memory allocated on the GPU
hipFree( dev_a );
hipFree( dev_b );
hipFree( dev_c );
}
/*
int main(){
gpuTest();
printf("hello");
}
*/
|
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
|
#include <hip/hip_runtime.h>
#include <stdio.h>
extern "C"
int example(int a, int b){
return a + b;
}
extern "C"
void example2(int input){
printf("%s%d\n", "hello ", input);
}
extern "C"
void gpuTest(void){
int n = 10;
int a[n], b[n], c[n];
int *dev_a, *dev_b, *dev_c;
hipMalloc( (void**)&dev_a, n * sizeof(int) );
hipMalloc( (void**)&dev_b, n * sizeof(int) );
hipMalloc( (void**)&dev_c, n * sizeof(int) );
for (int i=0; i<n; i++) {
a[i] = -i;
b[i] = i * i;
}
hipMemcpy( dev_a, a, n * sizeof(int), hipMemcpyHostToDevice );
hipMemcpy( dev_b, b, n * sizeof(int), hipMemcpyHostToDevice );
//add<<<n,1>>>( dev_a, dev_b, dev_c );
// copy the array 'c' back from the GPU to the CPU
hipMemcpy( c, dev_c, n * sizeof(int), hipMemcpyDeviceToHost );
// display the results
for (int i=0; i<n; i++) {
printf( "%d + %d = %d\n", a[i], b[i], c[i] );
}
// free the memory allocated on the GPU
hipFree( dev_a );
hipFree( dev_b );
hipFree( dev_c );
}
/*
int main(){
gpuTest();
printf("hello");
}
*/
|
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata
|
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
|
#include <hip/hip_runtime.h>
#include <stdio.h>
extern "C"
int example(int a, int b){
return a + b;
}
extern "C"
void example2(int input){
printf("%s%d\n", "hello ", input);
}
extern "C"
void gpuTest(void){
int n = 10;
int a[n], b[n], c[n];
int *dev_a, *dev_b, *dev_c;
hipMalloc( (void**)&dev_a, n * sizeof(int) );
hipMalloc( (void**)&dev_b, n * sizeof(int) );
hipMalloc( (void**)&dev_c, n * sizeof(int) );
for (int i=0; i<n; i++) {
a[i] = -i;
b[i] = i * i;
}
hipMemcpy( dev_a, a, n * sizeof(int), hipMemcpyHostToDevice );
hipMemcpy( dev_b, b, n * sizeof(int), hipMemcpyHostToDevice );
//add<<<n,1>>>( dev_a, dev_b, dev_c );
// copy the array 'c' back from the GPU to the CPU
hipMemcpy( c, dev_c, n * sizeof(int), hipMemcpyDeviceToHost );
// display the results
for (int i=0; i<n; i++) {
printf( "%d + %d = %d\n", a[i], b[i], c[i] );
}
// free the memory allocated on the GPU
hipFree( dev_a );
hipFree( dev_b );
hipFree( dev_c );
}
/*
int main(){
gpuTest();
printf("hello");
}
*/
|
.text
.file "nyTest.hip"
.globl example # -- Begin function example
.p2align 4, 0x90
.type example,@function
example: # @example
.cfi_startproc
# %bb.0:
# kill: def $esi killed $esi def $rsi
# kill: def $edi killed $edi def $rdi
leal (%rdi,%rsi), %eax
retq
.Lfunc_end0:
.size example, .Lfunc_end0-example
.cfi_endproc
# -- End function
.globl example2 # -- Begin function example2
.p2align 4, 0x90
.type example2,@function
example2: # @example2
.cfi_startproc
# %bb.0:
movl %edi, %edx
movl $.L.str, %edi
movl $.L.str.1, %esi
xorl %eax, %eax
jmp printf # TAILCALL
.Lfunc_end1:
.size example2, .Lfunc_end1-example2
.cfi_endproc
# -- End function
.globl gpuTest # -- Begin function gpuTest
.p2align 4, 0x90
.type gpuTest,@function
gpuTest: # @gpuTest
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $176, %rsp
.cfi_def_cfa_offset 192
.cfi_offset %rbx, -16
leaq 24(%rsp), %rdi
movl $40, %esi
callq hipMalloc
leaq 16(%rsp), %rdi
movl $40, %esi
callq hipMalloc
leaq 8(%rsp), %rdi
movl $40, %esi
callq hipMalloc
xorl %eax, %eax
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB2_1: # =>This Inner Loop Header: Depth=1
movl %eax, 80(%rsp,%rcx,4)
movl %ecx, %edx
imull %ecx, %edx
movl %edx, 32(%rsp,%rcx,4)
incq %rcx
decl %eax
cmpq $10, %rcx
jne .LBB2_1
# %bb.2:
movq 24(%rsp), %rdi
leaq 80(%rsp), %rsi
movl $40, %edx
movl $1, %ecx
callq hipMemcpy
movq 16(%rsp), %rdi
leaq 32(%rsp), %rsi
movl $40, %edx
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rsi
leaq 128(%rsp), %rdi
movl $40, %edx
movl $2, %ecx
callq hipMemcpy
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB2_3: # =>This Inner Loop Header: Depth=1
movl 80(%rsp,%rbx,4), %esi
movl 32(%rsp,%rbx,4), %edx
movl 128(%rsp,%rbx,4), %ecx
movl $.L.str.2, %edi
xorl %eax, %eax
callq printf
incq %rbx
cmpq $10, %rbx
jne .LBB2_3
# %bb.4:
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
addq $176, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size gpuTest, .Lfunc_end2-gpuTest
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "%s%d\n"
.size .L.str, 6
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "hello "
.size .L.str.1, 7
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "%d + %d = %d\n"
.size .L.str.2, 14
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
|
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
|
code for sm_80
|
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata
|
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
|
.file "tmpxft_00034919_00000000-6_nyTest.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2062:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2062:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl example
.type example, @function
example:
.LFB2057:
.cfi_startproc
endbr64
leal (%rdi,%rsi), %eax
ret
.cfi_endproc
.LFE2057:
.size example, .-example
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "hello "
.LC1:
.string "%s%d\n"
.text
.globl example2
.type example2, @function
example2:
.LFB2058:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movl %edi, %ecx
leaq .LC0(%rip), %rdx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2058:
.size example2, .-example2
.section .rodata.str1.1
.LC2:
.string "%d + %d = %d\n"
.text
.globl gpuTest
.type gpuTest, @function
gpuTest:
.LFB2059:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $184, %rsp
.cfi_def_cfa_offset 208
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rdi
movl $40, %esi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movl $40, %esi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movl $40, %esi
call cudaMalloc@PLT
movl $0, %eax
.L7:
movl %eax, %edx
negl %edx
movl %edx, 32(%rsp,%rax,4)
movl %eax, %edx
imull %eax, %edx
movl %edx, 80(%rsp,%rax,4)
addq $1, %rax
cmpq $10, %rax
jne .L7
leaq 32(%rsp), %rsi
movl $1, %ecx
movl $40, %edx
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
leaq 80(%rsp), %rsi
movl $1, %ecx
movl $40, %edx
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
leaq 128(%rsp), %rdi
movl $2, %ecx
movl $40, %edx
movq 24(%rsp), %rsi
call cudaMemcpy@PLT
movl $0, %ebx
leaq .LC2(%rip), %rbp
.L8:
movl 80(%rsp,%rbx), %ecx
movl 32(%rsp,%rbx), %edx
movl 128(%rsp,%rbx), %r8d
movq %rbp, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq $40, %rbx
jne .L8
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L13
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L13:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2059:
.size gpuTest, .-gpuTest
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4:
|
.text
.file "nyTest.hip"
.globl example # -- Begin function example
.p2align 4, 0x90
.type example,@function
example: # @example
.cfi_startproc
# %bb.0:
# kill: def $esi killed $esi def $rsi
# kill: def $edi killed $edi def $rdi
leal (%rdi,%rsi), %eax
retq
.Lfunc_end0:
.size example, .Lfunc_end0-example
.cfi_endproc
# -- End function
.globl example2 # -- Begin function example2
.p2align 4, 0x90
.type example2,@function
example2: # @example2
.cfi_startproc
# %bb.0:
movl %edi, %edx
movl $.L.str, %edi
movl $.L.str.1, %esi
xorl %eax, %eax
jmp printf # TAILCALL
.Lfunc_end1:
.size example2, .Lfunc_end1-example2
.cfi_endproc
# -- End function
.globl gpuTest # -- Begin function gpuTest
.p2align 4, 0x90
.type gpuTest,@function
gpuTest: # @gpuTest
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $176, %rsp
.cfi_def_cfa_offset 192
.cfi_offset %rbx, -16
leaq 24(%rsp), %rdi
movl $40, %esi
callq hipMalloc
leaq 16(%rsp), %rdi
movl $40, %esi
callq hipMalloc
leaq 8(%rsp), %rdi
movl $40, %esi
callq hipMalloc
xorl %eax, %eax
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB2_1: # =>This Inner Loop Header: Depth=1
movl %eax, 80(%rsp,%rcx,4)
movl %ecx, %edx
imull %ecx, %edx
movl %edx, 32(%rsp,%rcx,4)
incq %rcx
decl %eax
cmpq $10, %rcx
jne .LBB2_1
# %bb.2:
movq 24(%rsp), %rdi
leaq 80(%rsp), %rsi
movl $40, %edx
movl $1, %ecx
callq hipMemcpy
movq 16(%rsp), %rdi
leaq 32(%rsp), %rsi
movl $40, %edx
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rsi
leaq 128(%rsp), %rdi
movl $40, %edx
movl $2, %ecx
callq hipMemcpy
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB2_3: # =>This Inner Loop Header: Depth=1
movl 80(%rsp,%rbx,4), %esi
movl 32(%rsp,%rbx,4), %edx
movl 128(%rsp,%rbx,4), %ecx
movl $.L.str.2, %edi
xorl %eax, %eax
callq printf
incq %rbx
cmpq $10, %rbx
jne .LBB2_3
# %bb.4:
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
addq $176, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size gpuTest, .Lfunc_end2-gpuTest
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "%s%d\n"
.size .L.str, 6
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "hello "
.size .L.str.1, 7
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "%d + %d = %d\n"
.size .L.str.2, 14
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
|
#include <cassert>
#include <cstdio>
const int BLOCK_SIZE = 16;
#define getX (conv ? blockI[i][threadIdx.x] : blockI[threadIdx.y][i])
#define getW (conv ? blockW[i][threadIdx.y] : blockW[i][threadIdx.x])
template <bool conv>
__global__ void inf_dist_forward_kernel(const float* __restrict__ input, const float* __restrict__ weight,
int B, int CO_div_G, int CI_div_G, int HW, int G,
float* __restrict__ output, int* __restrict__ pos) {
float output_reg = 0;
int pos_reg = 0;
int b_hw = blockIdx.y * BLOCK_SIZE + (conv ? threadIdx.x : threadIdx.y);
int b = b_hw / HW;
int hw = b_hw % HW;
int write_co = blockIdx.x * BLOCK_SIZE + (conv ? threadIdx.y : threadIdx.x);
int read_w_co = blockIdx.x * BLOCK_SIZE + threadIdx.y;
__shared__ float blockI[BLOCK_SIZE][BLOCK_SIZE]; // CI * B if conv else B * CI
__shared__ float blockW[BLOCK_SIZE][BLOCK_SIZE + 2]; // CI * CO
int k;
for (k = 0; k < (CI_div_G & ~(BLOCK_SIZE - 1)); k += BLOCK_SIZE) {
if (b < B) {
if (conv) blockI[threadIdx.y][threadIdx.x] = input[((b * G + blockIdx.z) * CI_div_G + k + threadIdx.y) * HW + hw];
else blockI[threadIdx.y][threadIdx.x] = input[((b * G + blockIdx.z) * CI_div_G + k + threadIdx.x) * HW + hw];
}
if (read_w_co < CO_div_G)
blockW[threadIdx.x][threadIdx.y] = weight[(blockIdx.z * CO_div_G + read_w_co) * CI_div_G + k + threadIdx.x];
__syncthreads();
#pragma unroll
for (int i = 0; i < BLOCK_SIZE; i++) {
float t = getX - getW;
float abs_t = abs(t);
if (abs_t > output_reg) {
output_reg = abs_t;
pos_reg = k + i + (t >= 0 ? 0 : 1 << 31);
}
}
__syncthreads();
}
if (CI_div_G & (BLOCK_SIZE - 1)) {
if (b < B) {
if (conv && k + threadIdx.y < CI_div_G) blockI[threadIdx.y][threadIdx.x] = input[((b * G + blockIdx.z) * CI_div_G + k + threadIdx.y) * HW + hw];
if (!conv && k + threadIdx.x < CI_div_G) blockI[threadIdx.y][threadIdx.x] = input[((b * G + blockIdx.z) * CI_div_G + k + threadIdx.x) * HW + hw];
}
if (k + threadIdx.x < CI_div_G && read_w_co < CO_div_G)
blockW[threadIdx.x][threadIdx.y] = weight[(blockIdx.z * CO_div_G + read_w_co) * CI_div_G + k + threadIdx.x];
__syncthreads();
#pragma unroll
for (int i = 0; i < (CI_div_G & (BLOCK_SIZE - 1)); i++) {
float t = getX - getW;
float abs_t = abs(t);
if (abs_t > output_reg) {
output_reg = abs_t;
pos_reg = k + i + (t >= 0 ? 0 : 1 << 31);
}
}
}
if (b < B && write_co < CO_div_G) {
output[((b * G + blockIdx.z) * CO_div_G + write_co) * HW + hw] = output_reg;
pos[((b * G + blockIdx.z) * CO_div_G + write_co) * HW + hw] = pos_reg;
}
}
#define getXL (conv ? blockIL[i][threadIdx.x] : blockIL[threadIdx.y][i])
#define getXU (conv ? blockIU[i][threadIdx.x] : blockIU[threadIdx.y][i])
template <bool conv>
__global__ void inf_dist_bound_forward_kernel(const float* __restrict__ input_lower, const float* __restrict__ input_upper,
const float* __restrict__ weight,
int B, int CO_div_G, int CI_div_G, int HW, int G,
float* __restrict__ output_lower, float* __restrict__ output_upper) {
float output_regL = 0, output_regU= 0;
int b_hw = blockIdx.y * BLOCK_SIZE + (conv ? threadIdx.x : threadIdx.y);
int b = b_hw / HW;
int hw = b_hw % HW;
int write_co = blockIdx.x * BLOCK_SIZE + (conv ? threadIdx.y : threadIdx.x);
int read_w_co = blockIdx.x * BLOCK_SIZE + threadIdx.y;
__shared__ float blockIL[BLOCK_SIZE][BLOCK_SIZE], blockIU[BLOCK_SIZE][BLOCK_SIZE]; // CI * B if conv else B * CI
__shared__ float blockW[BLOCK_SIZE][BLOCK_SIZE + 2]; // CI * CO
int k;
for (k = 0; k < (CI_div_G & ~(BLOCK_SIZE - 1)); k += BLOCK_SIZE) {
if (b < B) {
if (conv) {
blockIL[threadIdx.y][threadIdx.x] = input_lower[((b * G + blockIdx.z) * CI_div_G + k + threadIdx.y) * HW + hw];
blockIU[threadIdx.y][threadIdx.x] = input_upper[((b * G + blockIdx.z) * CI_div_G + k + threadIdx.y) * HW + hw];
}
else {
blockIL[threadIdx.y][threadIdx.x] = input_lower[((b * G + blockIdx.z) * CI_div_G + k + threadIdx.x) * HW + hw];
blockIU[threadIdx.y][threadIdx.x] = input_upper[((b * G + blockIdx.z) * CI_div_G + k + threadIdx.x) * HW + hw];
}
}
if (read_w_co < CO_div_G)
blockW[threadIdx.x][threadIdx.y] = weight[(blockIdx.z * CO_div_G + read_w_co) * CI_div_G + k + threadIdx.x];
__syncthreads();
#pragma unroll
for (int i = 0; i < BLOCK_SIZE; i++) {
float tL = getXL - getW, tU = getXU - getW;
float abs_tL = abs(tL), abs_tU = abs(tU);
output_regU = max(output_regU, max(abs_tL, abs_tU));
if (!(tL < 0 && tU > 0))
output_regL = max(output_regL, min(abs_tL, abs_tU));
}
__syncthreads();
}
if (CI_div_G & (BLOCK_SIZE - 1)) {
if (b < B) {
if (conv && k + threadIdx.y < CI_div_G) {
blockIL[threadIdx.y][threadIdx.x] = input_lower[((b * G + blockIdx.z) * CI_div_G + k + threadIdx.y) * HW + hw];
blockIU[threadIdx.y][threadIdx.x] = input_upper[((b * G + blockIdx.z) * CI_div_G + k + threadIdx.y) * HW + hw];
}
if (!conv && k + threadIdx.x < CI_div_G) {
blockIL[threadIdx.y][threadIdx.x] = input_lower[((b * G + blockIdx.z) * CI_div_G + k + threadIdx.x) * HW + hw];
blockIU[threadIdx.y][threadIdx.x] = input_upper[((b * G + blockIdx.z) * CI_div_G + k + threadIdx.x) * HW + hw];
}
}
if (k + threadIdx.x < CI_div_G && read_w_co < CO_div_G)
blockW[threadIdx.x][threadIdx.y] = weight[(blockIdx.z * CO_div_G + read_w_co) * CI_div_G + k + threadIdx.x];
__syncthreads();
#pragma unroll
for (int i = 0; i < (CI_div_G & (BLOCK_SIZE - 1)); i++) {
float tL = getXL - getW, tU = getXU - getW;
float abs_tL = abs(tL), abs_tU = abs(tU);
output_regU = max(output_regU, max(abs_tL, abs_tU));
if (!(tL < 0 && tU > 0))
output_regL = max(output_regL, min(abs_tL, abs_tU));
}
}
if (b < B && write_co < CO_div_G) {
output_lower[((b * G + blockIdx.z) * CO_div_G + write_co) * HW + hw] = output_regL;
output_upper[((b * G + blockIdx.z) * CO_div_G + write_co) * HW + hw] = output_regU;
}
}
template <bool conv>
__global__ void inf_dist_backward_input_kernel(const float* __restrict__ grad_output, const int* __restrict__ pos,
int B, int CO_div_G, int CI_div_G, int HW, int G, float* __restrict__ grad_input) {
int b_hw = blockIdx.y * BLOCK_SIZE + (conv ? threadIdx.x : threadIdx.y);
int b = b_hw / HW;
int hw = b_hw % HW;
int co = blockIdx.x * BLOCK_SIZE + (conv ? threadIdx.y : threadIdx.x);
if (b < B && co < CO_div_G) {
int pos_reg = pos[((b * G + blockIdx.z) * CO_div_G + co) * HW + hw];
float grad = grad_output[((b * G + blockIdx.z) * CO_div_G + co) * HW + hw];
int index = pos_reg & (~(1 << 31));
atomicAdd(&grad_input[((b * G + blockIdx.z) * CI_div_G + index) * HW + hw], pos_reg >= 0 ? grad : -grad);
}
}
template <bool conv>
__global__ void inf_dist_backward_weight_kernel(const float* __restrict__ grad_output, const int* __restrict__ pos,
int B, int CO_div_G, int CI_div_G, int HW, int G, float* __restrict__ grad_weight) {
int b_hw = blockIdx.y * BLOCK_SIZE + (conv ? threadIdx.x : threadIdx.y);
int b = b_hw / HW;
int hw = b_hw % HW;
int co = blockIdx.x * BLOCK_SIZE + (conv ? threadIdx.y : threadIdx.x);
if (b < B && co < CO_div_G) {
int pos_reg = pos[((b * G + blockIdx.z) * CO_div_G + co) * HW + hw];
float grad = grad_output[((b * G + blockIdx.z) * CO_div_G + co) * HW + hw];
int index = pos_reg & (~(1 << 31));
atomicAdd(&grad_weight[(blockIdx.z * CO_div_G + co) * CI_div_G + index], pos_reg < 0 ? grad : -grad);
}
}
void inf_dist_forward_cuda(const float* input, const float* weight,
int B, int CO, int CI, int G, int HW, float* output, int* pos) {
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid2((CO / G + BLOCK_SIZE - 1) / BLOCK_SIZE, (B * HW + BLOCK_SIZE - 1) / BLOCK_SIZE, G);
if (HW == 1) inf_dist_forward_kernel<false><<<dimGrid2, dimBlock>>>(input, weight, B, CO / G, CI / G, HW, G, output, pos);
else inf_dist_forward_kernel<true><<<dimGrid2, dimBlock>>>(input, weight, B, CO / G, CI / G, HW, G, output, pos);
}
void inf_dist_bound_forward_cuda(const float* input_lower, const float* input_upper, const float* weight,
int B, int CO, int CI, int G, int HW, float* output_lower, float* output_upper) {
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid2((CO / G + BLOCK_SIZE - 1) / BLOCK_SIZE, (B * HW + BLOCK_SIZE - 1) / BLOCK_SIZE, G);
if (HW == 1) inf_dist_bound_forward_kernel<false><<<dimGrid2, dimBlock>>>(input_lower, input_upper, weight, B, CO / G, CI / G, HW, G, output_lower, output_upper);
else inf_dist_bound_forward_kernel<true><<<dimGrid2, dimBlock>>>(input_lower, input_upper, weight, B, CO / G, CI / G, HW, G, output_lower, output_upper);
}
void inf_dist_backward_input_cuda(const float* grad_output, const int* pos,
int B, int CO, int CI, int G, int HW, float* grad_input) {
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid2((CO / G + BLOCK_SIZE - 1) / BLOCK_SIZE, (B * HW + BLOCK_SIZE - 1) / BLOCK_SIZE, G);
if (HW == 1) inf_dist_backward_input_kernel<false><<<dimGrid2, dimBlock>>>(grad_output, pos, B, CO / G, CI / G, HW, G, grad_input);
else inf_dist_backward_input_kernel<true><<<dimGrid2, dimBlock>>>(grad_output, pos, B, CO / G, CI / G, HW, G, grad_input);
}
void inf_dist_backward_weight_cuda(const float* grad_output, const int* pos,
int B, int CO, int CI, int G, int HW, float* grad_weight) {
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid2((CO / G + BLOCK_SIZE - 1) / BLOCK_SIZE, (B * HW + BLOCK_SIZE - 1) / BLOCK_SIZE, G);
if (HW == 1) inf_dist_backward_weight_kernel<false><<<dimGrid2, dimBlock>>>(grad_output, pos, B, CO / G, CI / G, HW, G, grad_weight);
else inf_dist_backward_weight_kernel<true><<<dimGrid2, dimBlock>>>(grad_output, pos, B, CO / G, CI / G, HW, G, grad_weight);
}
|
.file "tmpxft_001a68a3_00000000-6_inf_dist_cuda.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL63__device_stub__Z23inf_dist_forward_kernelILb0EEvPKfS1_iiiiiPfPiPKfS0_iiiiiPfPi, @function
_ZL63__device_stub__Z23inf_dist_forward_kernelILb0EEvPKfS1_iiiiiPfPiPKfS0_iiiiiPfPi:
.LFB2093:
.cfi_startproc
subq $200, %rsp
.cfi_def_cfa_offset 208
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movl %r8d, 4(%rsp)
movl %r9d, (%rsp)
movq %fs:40, %rax
movq %rax, 184(%rsp)
xorl %eax, %eax
movq %rdi, 16(%rsp)
leaq 16(%rsp), %rax
movq %rax, 112(%rsp)
movq %rsi, 24(%rsp)
leaq 24(%rsp), %rax
movq %rax, 120(%rsp)
leaq 12(%rsp), %rax
movq %rax, 128(%rsp)
leaq 8(%rsp), %rax
movq %rax, 136(%rsp)
leaq 4(%rsp), %rax
movq %rax, 144(%rsp)
movq %rsp, %rax
movq %rax, 152(%rsp)
leaq 208(%rsp), %rax
movq %rax, 160(%rsp)
movq 216(%rsp), %rax
movq %rax, 32(%rsp)
leaq 32(%rsp), %rax
movq %rax, 168(%rsp)
movq 224(%rsp), %rax
movq %rax, 40(%rsp)
leaq 40(%rsp), %rax
movq %rax, 176(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L5
.L1:
movq 184(%rsp), %rax
subq %fs:40, %rax
jne .L6
addq $200, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L5:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 216
pushq 56(%rsp)
.cfi_def_cfa_offset 224
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z23inf_dist_forward_kernelILb0EEvPKfS1_iiiiiPfPi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 208
jmp .L1
.L6:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2093:
.size _ZL63__device_stub__Z23inf_dist_forward_kernelILb0EEvPKfS1_iiiiiPfPiPKfS0_iiiiiPfPi, .-_ZL63__device_stub__Z23inf_dist_forward_kernelILb0EEvPKfS1_iiiiiPfPiPKfS0_iiiiiPfPi
.section .text._Z23inf_dist_forward_kernelILb0EEvPKfS1_iiiiiPfPi,"axG",@progbits,_Z23inf_dist_forward_kernelILb0EEvPKfS1_iiiiiPfPi,comdat
.weak _Z23inf_dist_forward_kernelILb0EEvPKfS1_iiiiiPfPi
.type _Z23inf_dist_forward_kernelILb0EEvPKfS1_iiiiiPfPi, @function
_Z23inf_dist_forward_kernelILb0EEvPKfS1_iiiiiPfPi:
.LFB2156:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
pushq 40(%rsp)
.cfi_def_cfa_offset 32
pushq 40(%rsp)
.cfi_def_cfa_offset 40
movl 40(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 48
call _ZL63__device_stub__Z23inf_dist_forward_kernelILb0EEvPKfS1_iiiiiPfPiPKfS0_iiiiiPfPi
addq $40, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2156:
.size _Z23inf_dist_forward_kernelILb0EEvPKfS1_iiiiiPfPi, .-_Z23inf_dist_forward_kernelILb0EEvPKfS1_iiiiiPfPi
.text
.type _ZL63__device_stub__Z23inf_dist_forward_kernelILb1EEvPKfS1_iiiiiPfPiPKfS0_iiiiiPfPi, @function
_ZL63__device_stub__Z23inf_dist_forward_kernelILb1EEvPKfS1_iiiiiPfPiPKfS0_iiiiiPfPi:
.LFB2095:
.cfi_startproc
subq $200, %rsp
.cfi_def_cfa_offset 208
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movl %r8d, 4(%rsp)
movl %r9d, (%rsp)
movq %fs:40, %rax
movq %rax, 184(%rsp)
xorl %eax, %eax
movq %rdi, 16(%rsp)
leaq 16(%rsp), %rax
movq %rax, 112(%rsp)
movq %rsi, 24(%rsp)
leaq 24(%rsp), %rax
movq %rax, 120(%rsp)
leaq 12(%rsp), %rax
movq %rax, 128(%rsp)
leaq 8(%rsp), %rax
movq %rax, 136(%rsp)
leaq 4(%rsp), %rax
movq %rax, 144(%rsp)
movq %rsp, %rax
movq %rax, 152(%rsp)
leaq 208(%rsp), %rax
movq %rax, 160(%rsp)
movq 216(%rsp), %rax
movq %rax, 32(%rsp)
leaq 32(%rsp), %rax
movq %rax, 168(%rsp)
movq 224(%rsp), %rax
movq %rax, 40(%rsp)
leaq 40(%rsp), %rax
movq %rax, 176(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L13
.L9:
movq 184(%rsp), %rax
subq %fs:40, %rax
jne .L14
addq $200, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L13:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 216
pushq 56(%rsp)
.cfi_def_cfa_offset 224
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z23inf_dist_forward_kernelILb1EEvPKfS1_iiiiiPfPi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 208
jmp .L9
.L14:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2095:
.size _ZL63__device_stub__Z23inf_dist_forward_kernelILb1EEvPKfS1_iiiiiPfPiPKfS0_iiiiiPfPi, .-_ZL63__device_stub__Z23inf_dist_forward_kernelILb1EEvPKfS1_iiiiiPfPiPKfS0_iiiiiPfPi
.section .text._Z23inf_dist_forward_kernelILb1EEvPKfS1_iiiiiPfPi,"axG",@progbits,_Z23inf_dist_forward_kernelILb1EEvPKfS1_iiiiiPfPi,comdat
.weak _Z23inf_dist_forward_kernelILb1EEvPKfS1_iiiiiPfPi
.type _Z23inf_dist_forward_kernelILb1EEvPKfS1_iiiiiPfPi, @function
_Z23inf_dist_forward_kernelILb1EEvPKfS1_iiiiiPfPi:
.LFB2157:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
pushq 40(%rsp)
.cfi_def_cfa_offset 32
pushq 40(%rsp)
.cfi_def_cfa_offset 40
movl 40(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 48
call _ZL63__device_stub__Z23inf_dist_forward_kernelILb1EEvPKfS1_iiiiiPfPiPKfS0_iiiiiPfPi
addq $40, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2157:
.size _Z23inf_dist_forward_kernelILb1EEvPKfS1_iiiiiPfPi, .-_Z23inf_dist_forward_kernelILb1EEvPKfS1_iiiiiPfPi
.text
.type _ZL73__device_stub__Z29inf_dist_bound_forward_kernelILb0EEvPKfS1_S1_iiiiiPfS2_PKfS0_S0_iiiiiPfS1_, @function
_ZL73__device_stub__Z29inf_dist_bound_forward_kernelILb0EEvPKfS1_S1_iiiiiPfS2_PKfS0_S0_iiiiiPfS1_:
.LFB2097:
.cfi_startproc
subq $232, %rsp
.cfi_def_cfa_offset 240
movl %ecx, 12(%rsp)
movl %r8d, 8(%rsp)
movl %r9d, 4(%rsp)
movq %fs:40, %rax
movq %rax, 216(%rsp)
xorl %eax, %eax
movq %rdi, 24(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
movq %rsi, 32(%rsp)
leaq 32(%rsp), %rax
movq %rax, 136(%rsp)
movq %rdx, 40(%rsp)
leaq 40(%rsp), %rax
movq %rax, 144(%rsp)
leaq 12(%rsp), %rax
movq %rax, 152(%rsp)
leaq 8(%rsp), %rax
movq %rax, 160(%rsp)
leaq 4(%rsp), %rax
movq %rax, 168(%rsp)
leaq 240(%rsp), %rax
movq %rax, 176(%rsp)
leaq 248(%rsp), %rax
movq %rax, 184(%rsp)
movq 256(%rsp), %rax
movq %rax, 48(%rsp)
leaq 48(%rsp), %rax
movq %rax, 192(%rsp)
movq 264(%rsp), %rax
movq %rax, 56(%rsp)
leaq 56(%rsp), %rax
movq %rax, 200(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 88(%rsp)
movl $1, 92(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
leaq 72(%rsp), %rcx
leaq 64(%rsp), %rdx
leaq 92(%rsp), %rsi
leaq 80(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L21
.L17:
movq 216(%rsp), %rax
subq %fs:40, %rax
jne .L22
addq $232, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L21:
.cfi_restore_state
pushq 72(%rsp)
.cfi_def_cfa_offset 248
pushq 72(%rsp)
.cfi_def_cfa_offset 256
leaq 144(%rsp), %r9
movq 108(%rsp), %rcx
movl 116(%rsp), %r8d
movq 96(%rsp), %rsi
movl 104(%rsp), %edx
leaq _Z29inf_dist_bound_forward_kernelILb0EEvPKfS1_S1_iiiiiPfS2_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 240
jmp .L17
.L22:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2097:
.size _ZL73__device_stub__Z29inf_dist_bound_forward_kernelILb0EEvPKfS1_S1_iiiiiPfS2_PKfS0_S0_iiiiiPfS1_, .-_ZL73__device_stub__Z29inf_dist_bound_forward_kernelILb0EEvPKfS1_S1_iiiiiPfS2_PKfS0_S0_iiiiiPfS1_
.section .text._Z29inf_dist_bound_forward_kernelILb0EEvPKfS1_S1_iiiiiPfS2_,"axG",@progbits,_Z29inf_dist_bound_forward_kernelILb0EEvPKfS1_S1_iiiiiPfS2_,comdat
.weak _Z29inf_dist_bound_forward_kernelILb0EEvPKfS1_S1_iiiiiPfS2_
.type _Z29inf_dist_bound_forward_kernelILb0EEvPKfS1_S1_iiiiiPfS2_, @function
_Z29inf_dist_bound_forward_kernelILb0EEvPKfS1_S1_iiiiiPfS2_:
.LFB2158:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
pushq 40(%rsp)
.cfi_def_cfa_offset 24
pushq 40(%rsp)
.cfi_def_cfa_offset 32
movl 40(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 40
movl 40(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 48
call _ZL73__device_stub__Z29inf_dist_bound_forward_kernelILb0EEvPKfS1_S1_iiiiiPfS2_PKfS0_S0_iiiiiPfS1_
addq $40, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2158:
.size _Z29inf_dist_bound_forward_kernelILb0EEvPKfS1_S1_iiiiiPfS2_, .-_Z29inf_dist_bound_forward_kernelILb0EEvPKfS1_S1_iiiiiPfS2_
.text
.type _ZL73__device_stub__Z29inf_dist_bound_forward_kernelILb1EEvPKfS1_S1_iiiiiPfS2_PKfS0_S0_iiiiiPfS1_, @function
_ZL73__device_stub__Z29inf_dist_bound_forward_kernelILb1EEvPKfS1_S1_iiiiiPfS2_PKfS0_S0_iiiiiPfS1_:
.LFB2099:
.cfi_startproc
subq $232, %rsp
.cfi_def_cfa_offset 240
movl %ecx, 12(%rsp)
movl %r8d, 8(%rsp)
movl %r9d, 4(%rsp)
movq %fs:40, %rax
movq %rax, 216(%rsp)
xorl %eax, %eax
movq %rdi, 24(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
movq %rsi, 32(%rsp)
leaq 32(%rsp), %rax
movq %rax, 136(%rsp)
movq %rdx, 40(%rsp)
leaq 40(%rsp), %rax
movq %rax, 144(%rsp)
leaq 12(%rsp), %rax
movq %rax, 152(%rsp)
leaq 8(%rsp), %rax
movq %rax, 160(%rsp)
leaq 4(%rsp), %rax
movq %rax, 168(%rsp)
leaq 240(%rsp), %rax
movq %rax, 176(%rsp)
leaq 248(%rsp), %rax
movq %rax, 184(%rsp)
movq 256(%rsp), %rax
movq %rax, 48(%rsp)
leaq 48(%rsp), %rax
movq %rax, 192(%rsp)
movq 264(%rsp), %rax
movq %rax, 56(%rsp)
leaq 56(%rsp), %rax
movq %rax, 200(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 88(%rsp)
movl $1, 92(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
leaq 72(%rsp), %rcx
leaq 64(%rsp), %rdx
leaq 92(%rsp), %rsi
leaq 80(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L29
.L25:
movq 216(%rsp), %rax
subq %fs:40, %rax
jne .L30
addq $232, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L29:
.cfi_restore_state
pushq 72(%rsp)
.cfi_def_cfa_offset 248
pushq 72(%rsp)
.cfi_def_cfa_offset 256
leaq 144(%rsp), %r9
movq 108(%rsp), %rcx
movl 116(%rsp), %r8d
movq 96(%rsp), %rsi
movl 104(%rsp), %edx
leaq _Z29inf_dist_bound_forward_kernelILb1EEvPKfS1_S1_iiiiiPfS2_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 240
jmp .L25
.L30:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2099:
.size _ZL73__device_stub__Z29inf_dist_bound_forward_kernelILb1EEvPKfS1_S1_iiiiiPfS2_PKfS0_S0_iiiiiPfS1_, .-_ZL73__device_stub__Z29inf_dist_bound_forward_kernelILb1EEvPKfS1_S1_iiiiiPfS2_PKfS0_S0_iiiiiPfS1_
.section .text._Z29inf_dist_bound_forward_kernelILb1EEvPKfS1_S1_iiiiiPfS2_,"axG",@progbits,_Z29inf_dist_bound_forward_kernelILb1EEvPKfS1_S1_iiiiiPfS2_,comdat
.weak _Z29inf_dist_bound_forward_kernelILb1EEvPKfS1_S1_iiiiiPfS2_
.type _Z29inf_dist_bound_forward_kernelILb1EEvPKfS1_S1_iiiiiPfS2_, @function
_Z29inf_dist_bound_forward_kernelILb1EEvPKfS1_S1_iiiiiPfS2_:
.LFB2159:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
pushq 40(%rsp)
.cfi_def_cfa_offset 24
pushq 40(%rsp)
.cfi_def_cfa_offset 32
movl 40(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 40
movl 40(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 48
call _ZL73__device_stub__Z29inf_dist_bound_forward_kernelILb1EEvPKfS1_S1_iiiiiPfS2_PKfS0_S0_iiiiiPfS1_
addq $40, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2159:
.size _Z29inf_dist_bound_forward_kernelILb1EEvPKfS1_S1_iiiiiPfS2_, .-_Z29inf_dist_bound_forward_kernelILb1EEvPKfS1_S1_iiiiiPfS2_
.text
.type _ZL68__device_stub__Z30inf_dist_backward_input_kernelILb0EEvPKfPKiiiiiiPfPKfPKiiiiiiPf, @function
_ZL68__device_stub__Z30inf_dist_backward_input_kernelILb0EEvPKfPKiiiiiiPfPKfPKiiiiiiPf:
.LFB2101:
.cfi_startproc
subq $200, %rsp
.cfi_def_cfa_offset 208
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movl %r8d, 4(%rsp)
movl %r9d, (%rsp)
movq %fs:40, %rax
movq %rax, 184(%rsp)
xorl %eax, %eax
movq %rdi, 24(%rsp)
leaq 24(%rsp), %rax
movq %rax, 112(%rsp)
movq %rsi, 32(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 12(%rsp), %rax
movq %rax, 128(%rsp)
leaq 8(%rsp), %rax
movq %rax, 136(%rsp)
leaq 4(%rsp), %rax
movq %rax, 144(%rsp)
movq %rsp, %rax
movq %rax, 152(%rsp)
leaq 208(%rsp), %rax
movq %rax, 160(%rsp)
movq 216(%rsp), %rax
movq %rax, 40(%rsp)
leaq 40(%rsp), %rax
movq %rax, 168(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L37
.L33:
movq 184(%rsp), %rax
subq %fs:40, %rax
jne .L38
addq $200, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L37:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 216
pushq 56(%rsp)
.cfi_def_cfa_offset 224
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z30inf_dist_backward_input_kernelILb0EEvPKfPKiiiiiiPf(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 208
jmp .L33
.L38:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2101:
.size _ZL68__device_stub__Z30inf_dist_backward_input_kernelILb0EEvPKfPKiiiiiiPfPKfPKiiiiiiPf, .-_ZL68__device_stub__Z30inf_dist_backward_input_kernelILb0EEvPKfPKiiiiiiPfPKfPKiiiiiiPf
.section .text._Z30inf_dist_backward_input_kernelILb0EEvPKfPKiiiiiiPf,"axG",@progbits,_Z30inf_dist_backward_input_kernelILb0EEvPKfPKiiiiiiPf,comdat
.weak _Z30inf_dist_backward_input_kernelILb0EEvPKfPKiiiiiiPf
.type _Z30inf_dist_backward_input_kernelILb0EEvPKfPKiiiiiiPf, @function
_Z30inf_dist_backward_input_kernelILb0EEvPKfPKiiiiiiPf:
.LFB2160:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
pushq 24(%rsp)
.cfi_def_cfa_offset 24
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
call _ZL68__device_stub__Z30inf_dist_backward_input_kernelILb0EEvPKfPKiiiiiiPfPKfPKiiiiiiPf
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2160:
.size _Z30inf_dist_backward_input_kernelILb0EEvPKfPKiiiiiiPf, .-_Z30inf_dist_backward_input_kernelILb0EEvPKfPKiiiiiiPf
.text
.type _ZL68__device_stub__Z30inf_dist_backward_input_kernelILb1EEvPKfPKiiiiiiPfPKfPKiiiiiiPf, @function
_ZL68__device_stub__Z30inf_dist_backward_input_kernelILb1EEvPKfPKiiiiiiPfPKfPKiiiiiiPf:
.LFB2103:
.cfi_startproc
subq $200, %rsp
.cfi_def_cfa_offset 208
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movl %r8d, 4(%rsp)
movl %r9d, (%rsp)
movq %fs:40, %rax
movq %rax, 184(%rsp)
xorl %eax, %eax
movq %rdi, 24(%rsp)
leaq 24(%rsp), %rax
movq %rax, 112(%rsp)
movq %rsi, 32(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 12(%rsp), %rax
movq %rax, 128(%rsp)
leaq 8(%rsp), %rax
movq %rax, 136(%rsp)
leaq 4(%rsp), %rax
movq %rax, 144(%rsp)
movq %rsp, %rax
movq %rax, 152(%rsp)
leaq 208(%rsp), %rax
movq %rax, 160(%rsp)
movq 216(%rsp), %rax
movq %rax, 40(%rsp)
leaq 40(%rsp), %rax
movq %rax, 168(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L45
.L41:
movq 184(%rsp), %rax
subq %fs:40, %rax
jne .L46
addq $200, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L45:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 216
pushq 56(%rsp)
.cfi_def_cfa_offset 224
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z30inf_dist_backward_input_kernelILb1EEvPKfPKiiiiiiPf(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 208
jmp .L41
.L46:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2103:
.size _ZL68__device_stub__Z30inf_dist_backward_input_kernelILb1EEvPKfPKiiiiiiPfPKfPKiiiiiiPf, .-_ZL68__device_stub__Z30inf_dist_backward_input_kernelILb1EEvPKfPKiiiiiiPfPKfPKiiiiiiPf
.section .text._Z30inf_dist_backward_input_kernelILb1EEvPKfPKiiiiiiPf,"axG",@progbits,_Z30inf_dist_backward_input_kernelILb1EEvPKfPKiiiiiiPf,comdat
.weak _Z30inf_dist_backward_input_kernelILb1EEvPKfPKiiiiiiPf
.type _Z30inf_dist_backward_input_kernelILb1EEvPKfPKiiiiiiPf, @function
_Z30inf_dist_backward_input_kernelILb1EEvPKfPKiiiiiiPf:
.LFB2161:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
pushq 24(%rsp)
.cfi_def_cfa_offset 24
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
call _ZL68__device_stub__Z30inf_dist_backward_input_kernelILb1EEvPKfPKiiiiiiPfPKfPKiiiiiiPf
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2161:
.size _Z30inf_dist_backward_input_kernelILb1EEvPKfPKiiiiiiPf, .-_Z30inf_dist_backward_input_kernelILb1EEvPKfPKiiiiiiPf
.text
.type _ZL69__device_stub__Z31inf_dist_backward_weight_kernelILb0EEvPKfPKiiiiiiPfPKfPKiiiiiiPf, @function
_ZL69__device_stub__Z31inf_dist_backward_weight_kernelILb0EEvPKfPKiiiiiiPfPKfPKiiiiiiPf:
.LFB2105:
.cfi_startproc
subq $200, %rsp
.cfi_def_cfa_offset 208
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movl %r8d, 4(%rsp)
movl %r9d, (%rsp)
movq %fs:40, %rax
movq %rax, 184(%rsp)
xorl %eax, %eax
movq %rdi, 24(%rsp)
leaq 24(%rsp), %rax
movq %rax, 112(%rsp)
movq %rsi, 32(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 12(%rsp), %rax
movq %rax, 128(%rsp)
leaq 8(%rsp), %rax
movq %rax, 136(%rsp)
leaq 4(%rsp), %rax
movq %rax, 144(%rsp)
movq %rsp, %rax
movq %rax, 152(%rsp)
leaq 208(%rsp), %rax
movq %rax, 160(%rsp)
movq 216(%rsp), %rax
movq %rax, 40(%rsp)
leaq 40(%rsp), %rax
movq %rax, 168(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L53
.L49:
movq 184(%rsp), %rax
subq %fs:40, %rax
jne .L54
addq $200, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L53:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 216
pushq 56(%rsp)
.cfi_def_cfa_offset 224
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z31inf_dist_backward_weight_kernelILb0EEvPKfPKiiiiiiPf(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 208
jmp .L49
.L54:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2105:
.size _ZL69__device_stub__Z31inf_dist_backward_weight_kernelILb0EEvPKfPKiiiiiiPfPKfPKiiiiiiPf, .-_ZL69__device_stub__Z31inf_dist_backward_weight_kernelILb0EEvPKfPKiiiiiiPfPKfPKiiiiiiPf
.section .text._Z31inf_dist_backward_weight_kernelILb0EEvPKfPKiiiiiiPf,"axG",@progbits,_Z31inf_dist_backward_weight_kernelILb0EEvPKfPKiiiiiiPf,comdat
.weak _Z31inf_dist_backward_weight_kernelILb0EEvPKfPKiiiiiiPf
.type _Z31inf_dist_backward_weight_kernelILb0EEvPKfPKiiiiiiPf, @function
_Z31inf_dist_backward_weight_kernelILb0EEvPKfPKiiiiiiPf:
.LFB2162:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
pushq 24(%rsp)
.cfi_def_cfa_offset 24
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
call _ZL69__device_stub__Z31inf_dist_backward_weight_kernelILb0EEvPKfPKiiiiiiPfPKfPKiiiiiiPf
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2162:
.size _Z31inf_dist_backward_weight_kernelILb0EEvPKfPKiiiiiiPf, .-_Z31inf_dist_backward_weight_kernelILb0EEvPKfPKiiiiiiPf
.text
.type _ZL69__device_stub__Z31inf_dist_backward_weight_kernelILb1EEvPKfPKiiiiiiPfPKfPKiiiiiiPf, @function
_ZL69__device_stub__Z31inf_dist_backward_weight_kernelILb1EEvPKfPKiiiiiiPfPKfPKiiiiiiPf:
.LFB2107:
.cfi_startproc
subq $200, %rsp
.cfi_def_cfa_offset 208
movl %edx, 12(%rsp)
movl %ecx, 8(%rsp)
movl %r8d, 4(%rsp)
movl %r9d, (%rsp)
movq %fs:40, %rax
movq %rax, 184(%rsp)
xorl %eax, %eax
movq %rdi, 24(%rsp)
leaq 24(%rsp), %rax
movq %rax, 112(%rsp)
movq %rsi, 32(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 12(%rsp), %rax
movq %rax, 128(%rsp)
leaq 8(%rsp), %rax
movq %rax, 136(%rsp)
leaq 4(%rsp), %rax
movq %rax, 144(%rsp)
movq %rsp, %rax
movq %rax, 152(%rsp)
leaq 208(%rsp), %rax
movq %rax, 160(%rsp)
movq 216(%rsp), %rax
movq %rax, 40(%rsp)
leaq 40(%rsp), %rax
movq %rax, 168(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L61
.L57:
movq 184(%rsp), %rax
subq %fs:40, %rax
jne .L62
addq $200, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L61:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 216
pushq 56(%rsp)
.cfi_def_cfa_offset 224
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z31inf_dist_backward_weight_kernelILb1EEvPKfPKiiiiiiPf(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 208
jmp .L57
.L62:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2107:
.size _ZL69__device_stub__Z31inf_dist_backward_weight_kernelILb1EEvPKfPKiiiiiiPfPKfPKiiiiiiPf, .-_ZL69__device_stub__Z31inf_dist_backward_weight_kernelILb1EEvPKfPKiiiiiiPfPKfPKiiiiiiPf
.section .text._Z31inf_dist_backward_weight_kernelILb1EEvPKfPKiiiiiiPf,"axG",@progbits,_Z31inf_dist_backward_weight_kernelILb1EEvPKfPKiiiiiiPf,comdat
.weak _Z31inf_dist_backward_weight_kernelILb1EEvPKfPKiiiiiiPf
.type _Z31inf_dist_backward_weight_kernelILb1EEvPKfPKiiiiiiPf, @function
_Z31inf_dist_backward_weight_kernelILb1EEvPKfPKiiiiiiPf:
.LFB2163:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
pushq 24(%rsp)
.cfi_def_cfa_offset 24
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
call _ZL69__device_stub__Z31inf_dist_backward_weight_kernelILb1EEvPKfPKiiiiiiPfPKfPKiiiiiiPf
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2163:
.size _Z31inf_dist_backward_weight_kernelILb1EEvPKfPKiiiiiiPf, .-_Z31inf_dist_backward_weight_kernelILb1EEvPKfPKiiiiiiPf
.text
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2071:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2071:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z21inf_dist_forward_cudaPKfS0_iiiiiPfPi
.type _Z21inf_dist_forward_cudaPKfS0_iiiiiPfPi, @function
_Z21inf_dist_forward_cudaPKfS0_iiiiiPfPi:
.LFB2065:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $56, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 8(%rsp)
movq %rsi, %r14
movl %edx, %r12d
movl %r8d, %r13d
movl %r9d, %ebp
movl 112(%rsp), %r15d
movl $16, 24(%rsp)
movl $16, 28(%rsp)
movl $1, 32(%rsp)
movl %ecx, %eax
cltd
idivl %r9d
movl %eax, %ebx
leal 30(%rax), %eax
movl %ebx, %edx
addl $15, %edx
cmovns %edx, %eax
sarl $4, %eax
movl %eax, 36(%rsp)
movl %r12d, %edx
imull %r15d, %edx
leal 30(%rdx), %eax
addl $15, %edx
cmovns %edx, %eax
sarl $4, %eax
movl %eax, 40(%rsp)
movl %r9d, 44(%rsp)
cmpl $1, %r15d
je .L71
movl 32(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 24(%rsp), %rdx
movq 36(%rsp), %rdi
movl 44(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L72
.L67:
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L71:
.cfi_restore_state
movl 32(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 24(%rsp), %rdx
movq 36(%rsp), %rdi
movl %ebp, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L67
movl %r13d, %eax
cltd
idivl %ebp
movl %eax, %r8d
subq $8, %rsp
.cfi_def_cfa_offset 120
pushq 136(%rsp)
.cfi_def_cfa_offset 128
pushq 136(%rsp)
.cfi_def_cfa_offset 136
pushq %rbp
.cfi_def_cfa_offset 144
movl $1, %r9d
movl %ebx, %ecx
movl %r12d, %edx
movq %r14, %rsi
movq 40(%rsp), %rdi
call _ZL63__device_stub__Z23inf_dist_forward_kernelILb0EEvPKfS1_iiiiiPfPiPKfS0_iiiiiPfPi
addq $32, %rsp
.cfi_def_cfa_offset 112
jmp .L67
.L72:
movl %r13d, %eax
cltd
idivl %ebp
movl %eax, %r8d
subq $8, %rsp
.cfi_def_cfa_offset 120
pushq 136(%rsp)
.cfi_def_cfa_offset 128
pushq 136(%rsp)
.cfi_def_cfa_offset 136
pushq %rbp
.cfi_def_cfa_offset 144
movl %r15d, %r9d
movl %ebx, %ecx
movl %r12d, %edx
movq %r14, %rsi
movq 40(%rsp), %rdi
call _ZL63__device_stub__Z23inf_dist_forward_kernelILb1EEvPKfS1_iiiiiPfPiPKfS0_iiiiiPfPi
addq $32, %rsp
.cfi_def_cfa_offset 112
jmp .L67
.cfi_endproc
.LFE2065:
.size _Z21inf_dist_forward_cudaPKfS0_iiiiiPfPi, .-_Z21inf_dist_forward_cudaPKfS0_iiiiiPfPi
.globl _Z27inf_dist_bound_forward_cudaPKfS0_S0_iiiiiPfS1_
.type _Z27inf_dist_bound_forward_cudaPKfS0_S0_iiiiiPfS1_, @function
_Z27inf_dist_bound_forward_cudaPKfS0_S0_iiiiiPfS1_:
.LFB2066:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $56, %rsp
.cfi_def_cfa_offset 112
movq %rdi, (%rsp)
movq %rsi, 8(%rsp)
movq %rdx, %r14
movl %ecx, %r12d
movl %r9d, %r13d
movl 112(%rsp), %ebp
movl 120(%rsp), %r15d
movl $16, 24(%rsp)
movl $16, 28(%rsp)
movl $1, 32(%rsp)
movl %r8d, %eax
cltd
idivl %ebp
movl %eax, %ebx
leal 30(%rax), %eax
movl %ebx, %edx
addl $15, %edx
cmovns %edx, %eax
sarl $4, %eax
movl %eax, 36(%rsp)
movl %ecx, %edx
imull %r15d, %edx
leal 30(%rdx), %eax
addl $15, %edx
cmovns %edx, %eax
sarl $4, %eax
movl %eax, 40(%rsp)
movl %ebp, 44(%rsp)
cmpl $1, %r15d
je .L77
movl 32(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 24(%rsp), %rdx
movq 36(%rsp), %rdi
movl 44(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L78
.L73:
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L77:
.cfi_restore_state
movl 32(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 24(%rsp), %rdx
movq 36(%rsp), %rdi
movl %ebp, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L73
movl %r13d, %eax
cltd
idivl %ebp
movl %eax, %r9d
pushq 136(%rsp)
.cfi_def_cfa_offset 120
pushq 136(%rsp)
.cfi_def_cfa_offset 128
pushq %rbp
.cfi_def_cfa_offset 136
pushq $1
.cfi_def_cfa_offset 144
movl %ebx, %r8d
movl %r12d, %ecx
movq %r14, %rdx
movq 40(%rsp), %rsi
movq 32(%rsp), %rdi
call _ZL73__device_stub__Z29inf_dist_bound_forward_kernelILb0EEvPKfS1_S1_iiiiiPfS2_PKfS0_S0_iiiiiPfS1_
addq $32, %rsp
.cfi_def_cfa_offset 112
jmp .L73
.L78:
movl %r13d, %eax
cltd
idivl %ebp
movl %eax, %r9d
pushq 136(%rsp)
.cfi_def_cfa_offset 120
pushq 136(%rsp)
.cfi_def_cfa_offset 128
pushq %rbp
.cfi_def_cfa_offset 136
pushq %r15
.cfi_def_cfa_offset 144
movl %ebx, %r8d
movl %r12d, %ecx
movq %r14, %rdx
movq 40(%rsp), %rsi
movq 32(%rsp), %rdi
call _ZL73__device_stub__Z29inf_dist_bound_forward_kernelILb1EEvPKfS1_S1_iiiiiPfS2_PKfS0_S0_iiiiiPfS1_
addq $32, %rsp
.cfi_def_cfa_offset 112
jmp .L73
.cfi_endproc
.LFE2066:
.size _Z27inf_dist_bound_forward_cudaPKfS0_S0_iiiiiPfS1_, .-_Z27inf_dist_bound_forward_cudaPKfS0_S0_iiiiiPfS1_
.globl _Z28inf_dist_backward_input_cudaPKfPKiiiiiiPf
.type _Z28inf_dist_backward_input_cudaPKfPKiiiiiiPf, @function
_Z28inf_dist_backward_input_cudaPKfPKiiiiiiPf:
.LFB2067:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $56, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 8(%rsp)
movq %rsi, %r14
movl %edx, %r12d
movl %r8d, %r13d
movl %r9d, %ebp
movl 112(%rsp), %r15d
movl $16, 24(%rsp)
movl $16, 28(%rsp)
movl $1, 32(%rsp)
movl %ecx, %eax
cltd
idivl %r9d
movl %eax, %ebx
leal 30(%rax), %eax
movl %ebx, %edx
addl $15, %edx
cmovns %edx, %eax
sarl $4, %eax
movl %eax, 36(%rsp)
movl %r12d, %edx
imull %r15d, %edx
leal 30(%rdx), %eax
addl $15, %edx
cmovns %edx, %eax
sarl $4, %eax
movl %eax, 40(%rsp)
movl %r9d, 44(%rsp)
cmpl $1, %r15d
je .L83
movl 32(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 24(%rsp), %rdx
movq 36(%rsp), %rdi
movl 44(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L84
.L79:
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L83:
.cfi_restore_state
movl 32(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 24(%rsp), %rdx
movq 36(%rsp), %rdi
movl %ebp, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L79
movl %r13d, %eax
cltd
idivl %ebp
movl %eax, %r8d
pushq 120(%rsp)
.cfi_def_cfa_offset 120
pushq %rbp
.cfi_def_cfa_offset 128
movl $1, %r9d
movl %ebx, %ecx
movl %r12d, %edx
movq %r14, %rsi
movq 24(%rsp), %rdi
call _ZL68__device_stub__Z30inf_dist_backward_input_kernelILb0EEvPKfPKiiiiiiPfPKfPKiiiiiiPf
addq $16, %rsp
.cfi_def_cfa_offset 112
jmp .L79
.L84:
movl %r13d, %eax
cltd
idivl %ebp
movl %eax, %r8d
pushq 120(%rsp)
.cfi_def_cfa_offset 120
pushq %rbp
.cfi_def_cfa_offset 128
movl %r15d, %r9d
movl %ebx, %ecx
movl %r12d, %edx
movq %r14, %rsi
movq 24(%rsp), %rdi
call _ZL68__device_stub__Z30inf_dist_backward_input_kernelILb1EEvPKfPKiiiiiiPfPKfPKiiiiiiPf
addq $16, %rsp
.cfi_def_cfa_offset 112
jmp .L79
.cfi_endproc
.LFE2067:
.size _Z28inf_dist_backward_input_cudaPKfPKiiiiiiPf, .-_Z28inf_dist_backward_input_cudaPKfPKiiiiiiPf
.globl _Z29inf_dist_backward_weight_cudaPKfPKiiiiiiPf
.type _Z29inf_dist_backward_weight_cudaPKfPKiiiiiiPf, @function
_Z29inf_dist_backward_weight_cudaPKfPKiiiiiiPf:
.LFB2068:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $56, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 8(%rsp)
movq %rsi, %r14
movl %edx, %r12d
movl %r8d, %r13d
movl %r9d, %ebp
movl 112(%rsp), %r15d
movl $16, 24(%rsp)
movl $16, 28(%rsp)
movl $1, 32(%rsp)
movl %ecx, %eax
cltd
idivl %r9d
movl %eax, %ebx
leal 30(%rax), %eax
movl %ebx, %edx
addl $15, %edx
cmovns %edx, %eax
sarl $4, %eax
movl %eax, 36(%rsp)
movl %r12d, %edx
imull %r15d, %edx
leal 30(%rdx), %eax
addl $15, %edx
cmovns %edx, %eax
sarl $4, %eax
movl %eax, 40(%rsp)
movl %r9d, 44(%rsp)
cmpl $1, %r15d
je .L89
movl 32(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 24(%rsp), %rdx
movq 36(%rsp), %rdi
movl 44(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L90
.L85:
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L89:
.cfi_restore_state
movl 32(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 24(%rsp), %rdx
movq 36(%rsp), %rdi
movl %ebp, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L85
movl %r13d, %eax
cltd
idivl %ebp
movl %eax, %r8d
pushq 120(%rsp)
.cfi_def_cfa_offset 120
pushq %rbp
.cfi_def_cfa_offset 128
movl $1, %r9d
movl %ebx, %ecx
movl %r12d, %edx
movq %r14, %rsi
movq 24(%rsp), %rdi
call _ZL69__device_stub__Z31inf_dist_backward_weight_kernelILb0EEvPKfPKiiiiiiPfPKfPKiiiiiiPf
addq $16, %rsp
.cfi_def_cfa_offset 112
jmp .L85
.L90:
movl %r13d, %eax
cltd
idivl %ebp
movl %eax, %r8d
pushq 120(%rsp)
.cfi_def_cfa_offset 120
pushq %rbp
.cfi_def_cfa_offset 128
movl %r15d, %r9d
movl %ebx, %ecx
movl %r12d, %edx
movq %r14, %rsi
movq 24(%rsp), %rdi
call _ZL69__device_stub__Z31inf_dist_backward_weight_kernelILb1EEvPKfPKiiiiiiPfPKfPKiiiiiiPf
addq $16, %rsp
.cfi_def_cfa_offset 112
jmp .L85
.cfi_endproc
.LFE2068:
.size _Z29inf_dist_backward_weight_cudaPKfPKiiiiiiPf, .-_Z29inf_dist_backward_weight_cudaPKfPKiiiiiiPf
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z31inf_dist_backward_weight_kernelILb1EEvPKfPKiiiiiiPf"
.align 8
.LC1:
.string "_Z31inf_dist_backward_weight_kernelILb0EEvPKfPKiiiiiiPf"
.align 8
.LC2:
.string "_Z30inf_dist_backward_input_kernelILb1EEvPKfPKiiiiiiPf"
.align 8
.LC3:
.string "_Z30inf_dist_backward_input_kernelILb0EEvPKfPKiiiiiiPf"
.align 8
.LC4:
.string "_Z29inf_dist_bound_forward_kernelILb1EEvPKfS1_S1_iiiiiPfS2_"
.align 8
.LC5:
.string "_Z29inf_dist_bound_forward_kernelILb0EEvPKfS1_S1_iiiiiPfS2_"
.align 8
.LC6:
.string "_Z23inf_dist_forward_kernelILb1EEvPKfS1_iiiiiPfPi"
.align 8
.LC7:
.string "_Z23inf_dist_forward_kernelILb0EEvPKfS1_iiiiiPfPi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2110:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z31inf_dist_backward_weight_kernelILb1EEvPKfPKiiiiiiPf(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z31inf_dist_backward_weight_kernelILb0EEvPKfPKiiiiiiPf(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z30inf_dist_backward_input_kernelILb1EEvPKfPKiiiiiiPf(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq _Z30inf_dist_backward_input_kernelILb0EEvPKfPKiiiiiiPf(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC4(%rip), %rdx
movq %rdx, %rcx
leaq _Z29inf_dist_bound_forward_kernelILb1EEvPKfS1_S1_iiiiiPfS2_(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC5(%rip), %rdx
movq %rdx, %rcx
leaq _Z29inf_dist_bound_forward_kernelILb0EEvPKfS1_S1_iiiiiPfS2_(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC6(%rip), %rdx
movq %rdx, %rcx
leaq _Z23inf_dist_forward_kernelILb1EEvPKfS1_iiiiiPfPi(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC7(%rip), %rdx
movq %rdx, %rcx
leaq _Z23inf_dist_forward_kernelILb0EEvPKfS1_iiiiiPfPi(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2110:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4:
|
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
|
#include <cassert>
#include <cstdio>
const int BLOCK_SIZE = 16;
#define getX (conv ? blockI[i][threadIdx.x] : blockI[threadIdx.y][i])
#define getW (conv ? blockW[i][threadIdx.y] : blockW[i][threadIdx.x])
template <bool conv>
__global__ void inf_dist_forward_kernel(const float* __restrict__ input, const float* __restrict__ weight,
int B, int CO_div_G, int CI_div_G, int HW, int G,
float* __restrict__ output, int* __restrict__ pos) {
float output_reg = 0;
int pos_reg = 0;
int b_hw = blockIdx.y * BLOCK_SIZE + (conv ? threadIdx.x : threadIdx.y);
int b = b_hw / HW;
int hw = b_hw % HW;
int write_co = blockIdx.x * BLOCK_SIZE + (conv ? threadIdx.y : threadIdx.x);
int read_w_co = blockIdx.x * BLOCK_SIZE + threadIdx.y;
__shared__ float blockI[BLOCK_SIZE][BLOCK_SIZE]; // CI * B if conv else B * CI
__shared__ float blockW[BLOCK_SIZE][BLOCK_SIZE + 2]; // CI * CO
int k;
for (k = 0; k < (CI_div_G & ~(BLOCK_SIZE - 1)); k += BLOCK_SIZE) {
if (b < B) {
if (conv) blockI[threadIdx.y][threadIdx.x] = input[((b * G + blockIdx.z) * CI_div_G + k + threadIdx.y) * HW + hw];
else blockI[threadIdx.y][threadIdx.x] = input[((b * G + blockIdx.z) * CI_div_G + k + threadIdx.x) * HW + hw];
}
if (read_w_co < CO_div_G)
blockW[threadIdx.x][threadIdx.y] = weight[(blockIdx.z * CO_div_G + read_w_co) * CI_div_G + k + threadIdx.x];
__syncthreads();
#pragma unroll
for (int i = 0; i < BLOCK_SIZE; i++) {
float t = getX - getW;
float abs_t = abs(t);
if (abs_t > output_reg) {
output_reg = abs_t;
pos_reg = k + i + (t >= 0 ? 0 : 1 << 31);
}
}
__syncthreads();
}
if (CI_div_G & (BLOCK_SIZE - 1)) {
if (b < B) {
if (conv && k + threadIdx.y < CI_div_G) blockI[threadIdx.y][threadIdx.x] = input[((b * G + blockIdx.z) * CI_div_G + k + threadIdx.y) * HW + hw];
if (!conv && k + threadIdx.x < CI_div_G) blockI[threadIdx.y][threadIdx.x] = input[((b * G + blockIdx.z) * CI_div_G + k + threadIdx.x) * HW + hw];
}
if (k + threadIdx.x < CI_div_G && read_w_co < CO_div_G)
blockW[threadIdx.x][threadIdx.y] = weight[(blockIdx.z * CO_div_G + read_w_co) * CI_div_G + k + threadIdx.x];
__syncthreads();
#pragma unroll
for (int i = 0; i < (CI_div_G & (BLOCK_SIZE - 1)); i++) {
float t = getX - getW;
float abs_t = abs(t);
if (abs_t > output_reg) {
output_reg = abs_t;
pos_reg = k + i + (t >= 0 ? 0 : 1 << 31);
}
}
}
if (b < B && write_co < CO_div_G) {
output[((b * G + blockIdx.z) * CO_div_G + write_co) * HW + hw] = output_reg;
pos[((b * G + blockIdx.z) * CO_div_G + write_co) * HW + hw] = pos_reg;
}
}
#define getXL (conv ? blockIL[i][threadIdx.x] : blockIL[threadIdx.y][i])
#define getXU (conv ? blockIU[i][threadIdx.x] : blockIU[threadIdx.y][i])
template <bool conv>
__global__ void inf_dist_bound_forward_kernel(const float* __restrict__ input_lower, const float* __restrict__ input_upper,
const float* __restrict__ weight,
int B, int CO_div_G, int CI_div_G, int HW, int G,
float* __restrict__ output_lower, float* __restrict__ output_upper) {
float output_regL = 0, output_regU= 0;
int b_hw = blockIdx.y * BLOCK_SIZE + (conv ? threadIdx.x : threadIdx.y);
int b = b_hw / HW;
int hw = b_hw % HW;
int write_co = blockIdx.x * BLOCK_SIZE + (conv ? threadIdx.y : threadIdx.x);
int read_w_co = blockIdx.x * BLOCK_SIZE + threadIdx.y;
__shared__ float blockIL[BLOCK_SIZE][BLOCK_SIZE], blockIU[BLOCK_SIZE][BLOCK_SIZE]; // CI * B if conv else B * CI
__shared__ float blockW[BLOCK_SIZE][BLOCK_SIZE + 2]; // CI * CO
int k;
for (k = 0; k < (CI_div_G & ~(BLOCK_SIZE - 1)); k += BLOCK_SIZE) {
if (b < B) {
if (conv) {
blockIL[threadIdx.y][threadIdx.x] = input_lower[((b * G + blockIdx.z) * CI_div_G + k + threadIdx.y) * HW + hw];
blockIU[threadIdx.y][threadIdx.x] = input_upper[((b * G + blockIdx.z) * CI_div_G + k + threadIdx.y) * HW + hw];
}
else {
blockIL[threadIdx.y][threadIdx.x] = input_lower[((b * G + blockIdx.z) * CI_div_G + k + threadIdx.x) * HW + hw];
blockIU[threadIdx.y][threadIdx.x] = input_upper[((b * G + blockIdx.z) * CI_div_G + k + threadIdx.x) * HW + hw];
}
}
if (read_w_co < CO_div_G)
blockW[threadIdx.x][threadIdx.y] = weight[(blockIdx.z * CO_div_G + read_w_co) * CI_div_G + k + threadIdx.x];
__syncthreads();
#pragma unroll
for (int i = 0; i < BLOCK_SIZE; i++) {
float tL = getXL - getW, tU = getXU - getW;
float abs_tL = abs(tL), abs_tU = abs(tU);
output_regU = max(output_regU, max(abs_tL, abs_tU));
if (!(tL < 0 && tU > 0))
output_regL = max(output_regL, min(abs_tL, abs_tU));
}
__syncthreads();
}
if (CI_div_G & (BLOCK_SIZE - 1)) {
if (b < B) {
if (conv && k + threadIdx.y < CI_div_G) {
blockIL[threadIdx.y][threadIdx.x] = input_lower[((b * G + blockIdx.z) * CI_div_G + k + threadIdx.y) * HW + hw];
blockIU[threadIdx.y][threadIdx.x] = input_upper[((b * G + blockIdx.z) * CI_div_G + k + threadIdx.y) * HW + hw];
}
if (!conv && k + threadIdx.x < CI_div_G) {
blockIL[threadIdx.y][threadIdx.x] = input_lower[((b * G + blockIdx.z) * CI_div_G + k + threadIdx.x) * HW + hw];
blockIU[threadIdx.y][threadIdx.x] = input_upper[((b * G + blockIdx.z) * CI_div_G + k + threadIdx.x) * HW + hw];
}
}
if (k + threadIdx.x < CI_div_G && read_w_co < CO_div_G)
blockW[threadIdx.x][threadIdx.y] = weight[(blockIdx.z * CO_div_G + read_w_co) * CI_div_G + k + threadIdx.x];
__syncthreads();
#pragma unroll
for (int i = 0; i < (CI_div_G & (BLOCK_SIZE - 1)); i++) {
float tL = getXL - getW, tU = getXU - getW;
float abs_tL = abs(tL), abs_tU = abs(tU);
output_regU = max(output_regU, max(abs_tL, abs_tU));
if (!(tL < 0 && tU > 0))
output_regL = max(output_regL, min(abs_tL, abs_tU));
}
}
if (b < B && write_co < CO_div_G) {
output_lower[((b * G + blockIdx.z) * CO_div_G + write_co) * HW + hw] = output_regL;
output_upper[((b * G + blockIdx.z) * CO_div_G + write_co) * HW + hw] = output_regU;
}
}
template <bool conv>
__global__ void inf_dist_backward_input_kernel(const float* __restrict__ grad_output, const int* __restrict__ pos,
int B, int CO_div_G, int CI_div_G, int HW, int G, float* __restrict__ grad_input) {
int b_hw = blockIdx.y * BLOCK_SIZE + (conv ? threadIdx.x : threadIdx.y);
int b = b_hw / HW;
int hw = b_hw % HW;
int co = blockIdx.x * BLOCK_SIZE + (conv ? threadIdx.y : threadIdx.x);
if (b < B && co < CO_div_G) {
int pos_reg = pos[((b * G + blockIdx.z) * CO_div_G + co) * HW + hw];
float grad = grad_output[((b * G + blockIdx.z) * CO_div_G + co) * HW + hw];
int index = pos_reg & (~(1 << 31));
atomicAdd(&grad_input[((b * G + blockIdx.z) * CI_div_G + index) * HW + hw], pos_reg >= 0 ? grad : -grad);
}
}
template <bool conv>
__global__ void inf_dist_backward_weight_kernel(const float* __restrict__ grad_output, const int* __restrict__ pos,
int B, int CO_div_G, int CI_div_G, int HW, int G, float* __restrict__ grad_weight) {
int b_hw = blockIdx.y * BLOCK_SIZE + (conv ? threadIdx.x : threadIdx.y);
int b = b_hw / HW;
int hw = b_hw % HW;
int co = blockIdx.x * BLOCK_SIZE + (conv ? threadIdx.y : threadIdx.x);
if (b < B && co < CO_div_G) {
int pos_reg = pos[((b * G + blockIdx.z) * CO_div_G + co) * HW + hw];
float grad = grad_output[((b * G + blockIdx.z) * CO_div_G + co) * HW + hw];
int index = pos_reg & (~(1 << 31));
atomicAdd(&grad_weight[(blockIdx.z * CO_div_G + co) * CI_div_G + index], pos_reg < 0 ? grad : -grad);
}
}
void inf_dist_forward_cuda(const float* input, const float* weight,
int B, int CO, int CI, int G, int HW, float* output, int* pos) {
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid2((CO / G + BLOCK_SIZE - 1) / BLOCK_SIZE, (B * HW + BLOCK_SIZE - 1) / BLOCK_SIZE, G);
if (HW == 1) inf_dist_forward_kernel<false><<<dimGrid2, dimBlock>>>(input, weight, B, CO / G, CI / G, HW, G, output, pos);
else inf_dist_forward_kernel<true><<<dimGrid2, dimBlock>>>(input, weight, B, CO / G, CI / G, HW, G, output, pos);
}
void inf_dist_bound_forward_cuda(const float* input_lower, const float* input_upper, const float* weight,
int B, int CO, int CI, int G, int HW, float* output_lower, float* output_upper) {
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid2((CO / G + BLOCK_SIZE - 1) / BLOCK_SIZE, (B * HW + BLOCK_SIZE - 1) / BLOCK_SIZE, G);
if (HW == 1) inf_dist_bound_forward_kernel<false><<<dimGrid2, dimBlock>>>(input_lower, input_upper, weight, B, CO / G, CI / G, HW, G, output_lower, output_upper);
else inf_dist_bound_forward_kernel<true><<<dimGrid2, dimBlock>>>(input_lower, input_upper, weight, B, CO / G, CI / G, HW, G, output_lower, output_upper);
}
void inf_dist_backward_input_cuda(const float* grad_output, const int* pos,
int B, int CO, int CI, int G, int HW, float* grad_input) {
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid2((CO / G + BLOCK_SIZE - 1) / BLOCK_SIZE, (B * HW + BLOCK_SIZE - 1) / BLOCK_SIZE, G);
if (HW == 1) inf_dist_backward_input_kernel<false><<<dimGrid2, dimBlock>>>(grad_output, pos, B, CO / G, CI / G, HW, G, grad_input);
else inf_dist_backward_input_kernel<true><<<dimGrid2, dimBlock>>>(grad_output, pos, B, CO / G, CI / G, HW, G, grad_input);
}
void inf_dist_backward_weight_cuda(const float* grad_output, const int* pos,
int B, int CO, int CI, int G, int HW, float* grad_weight) {
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid2((CO / G + BLOCK_SIZE - 1) / BLOCK_SIZE, (B * HW + BLOCK_SIZE - 1) / BLOCK_SIZE, G);
if (HW == 1) inf_dist_backward_weight_kernel<false><<<dimGrid2, dimBlock>>>(grad_output, pos, B, CO / G, CI / G, HW, G, grad_weight);
else inf_dist_backward_weight_kernel<true><<<dimGrid2, dimBlock>>>(grad_output, pos, B, CO / G, CI / G, HW, G, grad_weight);
}
|
#include <hip/hip_runtime.h>
#include <cassert>
#include <cstdio>
const int BLOCK_SIZE = 16;
#define getX (conv ? blockI[i][threadIdx.x] : blockI[threadIdx.y][i])
#define getW (conv ? blockW[i][threadIdx.y] : blockW[i][threadIdx.x])
template <bool conv>
__global__ void inf_dist_forward_kernel(const float* __restrict__ input, const float* __restrict__ weight,
int B, int CO_div_G, int CI_div_G, int HW, int G,
float* __restrict__ output, int* __restrict__ pos) {
float output_reg = 0;
int pos_reg = 0;
int b_hw = blockIdx.y * BLOCK_SIZE + (conv ? threadIdx.x : threadIdx.y);
int b = b_hw / HW;
int hw = b_hw % HW;
int write_co = blockIdx.x * BLOCK_SIZE + (conv ? threadIdx.y : threadIdx.x);
int read_w_co = blockIdx.x * BLOCK_SIZE + threadIdx.y;
__shared__ float blockI[BLOCK_SIZE][BLOCK_SIZE]; // CI * B if conv else B * CI
__shared__ float blockW[BLOCK_SIZE][BLOCK_SIZE + 2]; // CI * CO
int k;
for (k = 0; k < (CI_div_G & ~(BLOCK_SIZE - 1)); k += BLOCK_SIZE) {
if (b < B) {
if (conv) blockI[threadIdx.y][threadIdx.x] = input[((b * G + blockIdx.z) * CI_div_G + k + threadIdx.y) * HW + hw];
else blockI[threadIdx.y][threadIdx.x] = input[((b * G + blockIdx.z) * CI_div_G + k + threadIdx.x) * HW + hw];
}
if (read_w_co < CO_div_G)
blockW[threadIdx.x][threadIdx.y] = weight[(blockIdx.z * CO_div_G + read_w_co) * CI_div_G + k + threadIdx.x];
__syncthreads();
#pragma unroll
for (int i = 0; i < BLOCK_SIZE; i++) {
float t = getX - getW;
float abs_t = abs(t);
if (abs_t > output_reg) {
output_reg = abs_t;
pos_reg = k + i + (t >= 0 ? 0 : 1 << 31);
}
}
__syncthreads();
}
if (CI_div_G & (BLOCK_SIZE - 1)) {
if (b < B) {
if (conv && k + threadIdx.y < CI_div_G) blockI[threadIdx.y][threadIdx.x] = input[((b * G + blockIdx.z) * CI_div_G + k + threadIdx.y) * HW + hw];
if (!conv && k + threadIdx.x < CI_div_G) blockI[threadIdx.y][threadIdx.x] = input[((b * G + blockIdx.z) * CI_div_G + k + threadIdx.x) * HW + hw];
}
if (k + threadIdx.x < CI_div_G && read_w_co < CO_div_G)
blockW[threadIdx.x][threadIdx.y] = weight[(blockIdx.z * CO_div_G + read_w_co) * CI_div_G + k + threadIdx.x];
__syncthreads();
#pragma unroll
for (int i = 0; i < (CI_div_G & (BLOCK_SIZE - 1)); i++) {
float t = getX - getW;
float abs_t = abs(t);
if (abs_t > output_reg) {
output_reg = abs_t;
pos_reg = k + i + (t >= 0 ? 0 : 1 << 31);
}
}
}
if (b < B && write_co < CO_div_G) {
output[((b * G + blockIdx.z) * CO_div_G + write_co) * HW + hw] = output_reg;
pos[((b * G + blockIdx.z) * CO_div_G + write_co) * HW + hw] = pos_reg;
}
}
#define getXL (conv ? blockIL[i][threadIdx.x] : blockIL[threadIdx.y][i])
#define getXU (conv ? blockIU[i][threadIdx.x] : blockIU[threadIdx.y][i])
template <bool conv>
__global__ void inf_dist_bound_forward_kernel(const float* __restrict__ input_lower, const float* __restrict__ input_upper,
const float* __restrict__ weight,
int B, int CO_div_G, int CI_div_G, int HW, int G,
float* __restrict__ output_lower, float* __restrict__ output_upper) {
float output_regL = 0, output_regU= 0;
int b_hw = blockIdx.y * BLOCK_SIZE + (conv ? threadIdx.x : threadIdx.y);
int b = b_hw / HW;
int hw = b_hw % HW;
int write_co = blockIdx.x * BLOCK_SIZE + (conv ? threadIdx.y : threadIdx.x);
int read_w_co = blockIdx.x * BLOCK_SIZE + threadIdx.y;
__shared__ float blockIL[BLOCK_SIZE][BLOCK_SIZE], blockIU[BLOCK_SIZE][BLOCK_SIZE]; // CI * B if conv else B * CI
__shared__ float blockW[BLOCK_SIZE][BLOCK_SIZE + 2]; // CI * CO
int k;
for (k = 0; k < (CI_div_G & ~(BLOCK_SIZE - 1)); k += BLOCK_SIZE) {
if (b < B) {
if (conv) {
blockIL[threadIdx.y][threadIdx.x] = input_lower[((b * G + blockIdx.z) * CI_div_G + k + threadIdx.y) * HW + hw];
blockIU[threadIdx.y][threadIdx.x] = input_upper[((b * G + blockIdx.z) * CI_div_G + k + threadIdx.y) * HW + hw];
}
else {
blockIL[threadIdx.y][threadIdx.x] = input_lower[((b * G + blockIdx.z) * CI_div_G + k + threadIdx.x) * HW + hw];
blockIU[threadIdx.y][threadIdx.x] = input_upper[((b * G + blockIdx.z) * CI_div_G + k + threadIdx.x) * HW + hw];
}
}
if (read_w_co < CO_div_G)
blockW[threadIdx.x][threadIdx.y] = weight[(blockIdx.z * CO_div_G + read_w_co) * CI_div_G + k + threadIdx.x];
__syncthreads();
#pragma unroll
for (int i = 0; i < BLOCK_SIZE; i++) {
float tL = getXL - getW, tU = getXU - getW;
float abs_tL = abs(tL), abs_tU = abs(tU);
output_regU = max(output_regU, max(abs_tL, abs_tU));
if (!(tL < 0 && tU > 0))
output_regL = max(output_regL, min(abs_tL, abs_tU));
}
__syncthreads();
}
if (CI_div_G & (BLOCK_SIZE - 1)) {
if (b < B) {
if (conv && k + threadIdx.y < CI_div_G) {
blockIL[threadIdx.y][threadIdx.x] = input_lower[((b * G + blockIdx.z) * CI_div_G + k + threadIdx.y) * HW + hw];
blockIU[threadIdx.y][threadIdx.x] = input_upper[((b * G + blockIdx.z) * CI_div_G + k + threadIdx.y) * HW + hw];
}
if (!conv && k + threadIdx.x < CI_div_G) {
blockIL[threadIdx.y][threadIdx.x] = input_lower[((b * G + blockIdx.z) * CI_div_G + k + threadIdx.x) * HW + hw];
blockIU[threadIdx.y][threadIdx.x] = input_upper[((b * G + blockIdx.z) * CI_div_G + k + threadIdx.x) * HW + hw];
}
}
if (k + threadIdx.x < CI_div_G && read_w_co < CO_div_G)
blockW[threadIdx.x][threadIdx.y] = weight[(blockIdx.z * CO_div_G + read_w_co) * CI_div_G + k + threadIdx.x];
__syncthreads();
#pragma unroll
for (int i = 0; i < (CI_div_G & (BLOCK_SIZE - 1)); i++) {
float tL = getXL - getW, tU = getXU - getW;
float abs_tL = abs(tL), abs_tU = abs(tU);
output_regU = max(output_regU, max(abs_tL, abs_tU));
if (!(tL < 0 && tU > 0))
output_regL = max(output_regL, min(abs_tL, abs_tU));
}
}
if (b < B && write_co < CO_div_G) {
output_lower[((b * G + blockIdx.z) * CO_div_G + write_co) * HW + hw] = output_regL;
output_upper[((b * G + blockIdx.z) * CO_div_G + write_co) * HW + hw] = output_regU;
}
}
template <bool conv>
__global__ void inf_dist_backward_input_kernel(const float* __restrict__ grad_output, const int* __restrict__ pos,
int B, int CO_div_G, int CI_div_G, int HW, int G, float* __restrict__ grad_input) {
int b_hw = blockIdx.y * BLOCK_SIZE + (conv ? threadIdx.x : threadIdx.y);
int b = b_hw / HW;
int hw = b_hw % HW;
int co = blockIdx.x * BLOCK_SIZE + (conv ? threadIdx.y : threadIdx.x);
if (b < B && co < CO_div_G) {
int pos_reg = pos[((b * G + blockIdx.z) * CO_div_G + co) * HW + hw];
float grad = grad_output[((b * G + blockIdx.z) * CO_div_G + co) * HW + hw];
int index = pos_reg & (~(1 << 31));
atomicAdd(&grad_input[((b * G + blockIdx.z) * CI_div_G + index) * HW + hw], pos_reg >= 0 ? grad : -grad);
}
}
template <bool conv>
__global__ void inf_dist_backward_weight_kernel(const float* __restrict__ grad_output, const int* __restrict__ pos,
int B, int CO_div_G, int CI_div_G, int HW, int G, float* __restrict__ grad_weight) {
int b_hw = blockIdx.y * BLOCK_SIZE + (conv ? threadIdx.x : threadIdx.y);
int b = b_hw / HW;
int hw = b_hw % HW;
int co = blockIdx.x * BLOCK_SIZE + (conv ? threadIdx.y : threadIdx.x);
if (b < B && co < CO_div_G) {
int pos_reg = pos[((b * G + blockIdx.z) * CO_div_G + co) * HW + hw];
float grad = grad_output[((b * G + blockIdx.z) * CO_div_G + co) * HW + hw];
int index = pos_reg & (~(1 << 31));
atomicAdd(&grad_weight[(blockIdx.z * CO_div_G + co) * CI_div_G + index], pos_reg < 0 ? grad : -grad);
}
}
void inf_dist_forward_cuda(const float* input, const float* weight,
int B, int CO, int CI, int G, int HW, float* output, int* pos) {
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid2((CO / G + BLOCK_SIZE - 1) / BLOCK_SIZE, (B * HW + BLOCK_SIZE - 1) / BLOCK_SIZE, G);
if (HW == 1) inf_dist_forward_kernel<false><<<dimGrid2, dimBlock>>>(input, weight, B, CO / G, CI / G, HW, G, output, pos);
else inf_dist_forward_kernel<true><<<dimGrid2, dimBlock>>>(input, weight, B, CO / G, CI / G, HW, G, output, pos);
}
void inf_dist_bound_forward_cuda(const float* input_lower, const float* input_upper, const float* weight,
int B, int CO, int CI, int G, int HW, float* output_lower, float* output_upper) {
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid2((CO / G + BLOCK_SIZE - 1) / BLOCK_SIZE, (B * HW + BLOCK_SIZE - 1) / BLOCK_SIZE, G);
if (HW == 1) inf_dist_bound_forward_kernel<false><<<dimGrid2, dimBlock>>>(input_lower, input_upper, weight, B, CO / G, CI / G, HW, G, output_lower, output_upper);
else inf_dist_bound_forward_kernel<true><<<dimGrid2, dimBlock>>>(input_lower, input_upper, weight, B, CO / G, CI / G, HW, G, output_lower, output_upper);
}
void inf_dist_backward_input_cuda(const float* grad_output, const int* pos,
int B, int CO, int CI, int G, int HW, float* grad_input) {
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid2((CO / G + BLOCK_SIZE - 1) / BLOCK_SIZE, (B * HW + BLOCK_SIZE - 1) / BLOCK_SIZE, G);
if (HW == 1) inf_dist_backward_input_kernel<false><<<dimGrid2, dimBlock>>>(grad_output, pos, B, CO / G, CI / G, HW, G, grad_input);
else inf_dist_backward_input_kernel<true><<<dimGrid2, dimBlock>>>(grad_output, pos, B, CO / G, CI / G, HW, G, grad_input);
}
void inf_dist_backward_weight_cuda(const float* grad_output, const int* pos,
int B, int CO, int CI, int G, int HW, float* grad_weight) {
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid2((CO / G + BLOCK_SIZE - 1) / BLOCK_SIZE, (B * HW + BLOCK_SIZE - 1) / BLOCK_SIZE, G);
if (HW == 1) inf_dist_backward_weight_kernel<false><<<dimGrid2, dimBlock>>>(grad_output, pos, B, CO / G, CI / G, HW, G, grad_weight);
else inf_dist_backward_weight_kernel<true><<<dimGrid2, dimBlock>>>(grad_output, pos, B, CO / G, CI / G, HW, G, grad_weight);
}
|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
|
#include <stdio.h>
#include <math.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <time.h>
__global__ void calculations_inv(float *d_array, float *ans,int *n, long *iter)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
float *d_ptr_array = d_array;
int i;
//obliczanie sumy
//ustawienie wskaznika na odpowiedni wiersz tablicy
if(id < *iter)
{
float add = 0.0;
float mult = 1.0;
d_ptr_array = d_ptr_array + id; // przejscie do wiersza "id"
for(i = 0 ; i < *n; i++)
{
add += *d_ptr_array * *d_ptr_array;
d_ptr_array++;
}
//ustawienie wskaznika ponownie na poczatek tablicy
d_ptr_array = d_array + id;
mult = cos(*d_ptr_array);
d_ptr_array++;
for(i = 1 ; i < *n; i++)
{
mult = mult * cos(*d_ptr_array/(i+1));
d_ptr_array++;
}
ans[id] = 1 / 40 * add + 1 - mult;
}
}
int main(int argc, char const *argv[])
{
if(argc == 5)
{
clock_t start, end;
double used_time;
start = clock();
int i; // iterator
int j; // iterator
//zmienne z lini argumentów wywołania programu
const int n = atoi(argv[1]); // wymiar zadania
const int I = atoi(argv[2]); // liczba iteracji - > obliczenia przeprowadzane sa na wartosci I^n
const double iter = pow(I, n);
const int Blocks = atoi(argv[3]); // liczba bloków GPU
const int Threads = atoi(argv[4]); // liczba watków dla jednego bloku GPU
//const float x_min = -20.0; // minimalna warotsc dziedziny zadania
const float x_max = 20.0; // maksymalna wartosc dziedziny zadania
const float rand_max = RAND_MAX / 40.0; //ograniczenie przedzialu losowania zmiennych
float *h_random_array = (float* ) malloc(sizeof(float) * n * iter); //do operacji na danych stosowana jest tablica jednowymiarowa ze wzgledu na alokacje pamieci w GPU
float *h_ptr_iterator = h_random_array;
float *ans = (float* ) malloc(sizeof(float) * iter);
//losowanie wartosci i umieszczenie ich w tablicy
for(i = 0 ; i < iter; i++)
{
for(j = 0 ; j < n ; j++)
{
*h_ptr_iterator = rand() / rand_max - x_max;
h_ptr_iterator += 1;
}
}
float *d_random_array; //tablica zmiennych wylosowanych w pamieci GPU
float *d_ans; //tablica wynikow
int *d_n; // wymiar
long *d_iter; //ilosc iteratcji
cudaMalloc((void **)&d_random_array, sizeof(float) * n * iter);
cudaMalloc((void **)&d_ans, sizeof(float) * iter);
cudaMalloc((void **)&d_n, sizeof(int));
cudaMalloc((void **)&d_iter, sizeof(long));
cudaMemcpy(d_random_array, h_random_array, sizeof(float) * n * iter, cudaMemcpyHostToDevice);
cudaMemcpy(d_ans, ans, sizeof(float) * iter, cudaMemcpyHostToDevice);
cudaMemcpy(d_n, &n, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_iter, &iter, sizeof(long) , cudaMemcpyHostToDevice);
printf("Liczba blocków : n [%d] Liczba iteracji [%f] , bloki [%d] watki : [%d]", n , iter, Blocks , Threads);
calculations_inv<<<Blocks, Threads>>>(d_random_array, d_ans, d_n, d_iter);
cudaMemcpy(ans, d_ans, sizeof(float) * iter , cudaMemcpyDeviceToHost);
//szukanie minimum
float y_min = ans[0];
for(i = 0 ; i < iter; i++)
{
if(ans[i] < y_min) y_min = ans[i];
}
end = clock();
used_time = ((double) (end - start) / CLOCKS_PER_SEC);
printf("szukane minimum : %f - czas : %f \n " , y_min, used_time);
cudaFree(d_random_array);
cudaFree(d_ans);
cudaFree(d_n);
cudaFree(d_iter);
free(h_random_array);
free(ans);
}
else
{
printf("Invalid program parameters plese type /main2 N I Blocks Threads where \n");
printf("N - is problem dimension\n");
printf("I - is number of iteratios\n");
printf("Blocks - is number of used GPU blocks...max is %d\n", 0);
printf("Threads- is number of used GPU threads per one block ... max is %d \n", 0);
return 0;
}
return 0;
}
|
.file "tmpxft_00137bbc_00000000-6_main3.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z42__device_stub__Z16calculations_invPfS_PiPlPfS_PiPl
.type _Z42__device_stub__Z16calculations_invPfS_PiPlPfS_PiPl, @function
_Z42__device_stub__Z16calculations_invPfS_PiPlPfS_PiPl:
.LFB2082:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %rcx, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movq %rsp, %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z16calculations_invPfS_PiPl(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z42__device_stub__Z16calculations_invPfS_PiPlPfS_PiPl, .-_Z42__device_stub__Z16calculations_invPfS_PiPlPfS_PiPl
.globl _Z16calculations_invPfS_PiPl
.type _Z16calculations_invPfS_PiPl, @function
_Z16calculations_invPfS_PiPl:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z42__device_stub__Z16calculations_invPfS_PiPlPfS_PiPl
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z16calculations_invPfS_PiPl, .-_Z16calculations_invPfS_PiPl
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC5:
.string "Liczba block\303\263w : n [%d] Liczba iteracji [%f] , bloki [%d] watki : [%d]"
.align 8
.LC7:
.string "szukane minimum : %f - czas : %f \n "
.align 8
.LC8:
.string "Invalid program parameters plese type /main2 N I Blocks Threads where \n"
.section .rodata.str1.1,"aMS",@progbits,1
.LC9:
.string "N - is problem dimension\n"
.LC10:
.string "I - is number of iteratios\n"
.section .rodata.str1.8
.align 8
.LC11:
.string "Blocks - is number of used GPU blocks...max is %d\n"
.align 8
.LC12:
.string "Threads- is number of used GPU threads per one block ... max is %d \n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $120, %rsp
.cfi_def_cfa_offset 176
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
cmpl $5, %edi
jne .L12
movq %rsi, %rbx
call clock@PLT
movq %rax, 16(%rsp)
movq 8(%rbx), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movl %eax, 36(%rsp)
movq 16(%rbx), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
pxor %xmm0, %xmm0
cvtsi2sdl %eax, %xmm0
pxor %xmm1, %xmm1
cvtsi2sdl 36(%rsp), %xmm1
call pow@PLT
movsd %xmm0, 40(%rsp)
movq 24(%rbx), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, 8(%rsp)
movl %eax, 28(%rsp)
movq 32(%rbx), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, %r15
movq 40(%rsp), %rbx
movslq 36(%rsp), %rax
salq $2, %rax
js .L13
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
.L14:
movq %rbx, %xmm4
mulsd %xmm4, %xmm0
comisd .LC0(%rip), %xmm0
jnb .L15
cvttsd2siq %xmm0, %rdi
.L16:
call malloc@PLT
movq %rax, %r14
movq %rbx, %xmm0
mulsd .LC1(%rip), %xmm0
comisd .LC0(%rip), %xmm0
jnb .L17
cvttsd2siq %xmm0, %rdi
.L18:
call malloc@PLT
movq %rax, %r13
movq %r14, %rbp
movl $0, %r12d
pxor %xmm0, %xmm0
movq %rbx, %xmm5
comisd %xmm0, %xmm5
ja .L19
.L20:
movslq 36(%rsp), %rax
salq $2, %rax
js .L24
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
.L25:
movq %rbx, %xmm6
mulsd %xmm6, %xmm0
comisd .LC0(%rip), %xmm0
jnb .L26
cvttsd2siq %xmm0, %rsi
.L27:
leaq 48(%rsp), %rdi
call cudaMalloc@PLT
movsd .LC1(%rip), %xmm0
mulsd 40(%rsp), %xmm0
comisd .LC0(%rip), %xmm0
jnb .L28
cvttsd2siq %xmm0, %rsi
.L29:
leaq 56(%rsp), %rdi
call cudaMalloc@PLT
leaq 64(%rsp), %rdi
movl $4, %esi
call cudaMalloc@PLT
leaq 72(%rsp), %rdi
movl $8, %esi
call cudaMalloc@PLT
movslq 36(%rsp), %rax
salq $2, %rax
js .L30
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
.L31:
mulsd 40(%rsp), %xmm0
comisd .LC0(%rip), %xmm0
jnb .L32
cvttsd2siq %xmm0, %rdx
.L33:
movl $1, %ecx
movq %r14, %rsi
movq 48(%rsp), %rdi
call cudaMemcpy@PLT
movsd .LC1(%rip), %xmm0
mulsd 40(%rsp), %xmm0
comisd .LC0(%rip), %xmm0
jnb .L34
cvttsd2siq %xmm0, %rdx
.L35:
movl $1, %ecx
movq %r13, %rsi
movq 56(%rsp), %rdi
call cudaMemcpy@PLT
leaq 36(%rsp), %rsi
movl $1, %ecx
movl $4, %edx
movq 64(%rsp), %rdi
call cudaMemcpy@PLT
leaq 40(%rsp), %rsi
movl $1, %ecx
movl $8, %edx
movq 72(%rsp), %rdi
call cudaMemcpy@PLT
movl %r15d, %r8d
movl 28(%rsp), %ecx
movsd 40(%rsp), %xmm0
movl 36(%rsp), %edx
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movl %r15d, 92(%rsp)
movl $1, 96(%rsp)
movl 8(%rsp), %eax
movl %eax, 80(%rsp)
movl $1, 84(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 92(%rsp), %rdx
movl $1, %ecx
movq 80(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L56
.L36:
movsd .LC1(%rip), %xmm0
mulsd 40(%rsp), %xmm0
comisd .LC0(%rip), %xmm0
jnb .L37
cvttsd2siq %xmm0, %rdx
.L38:
movl $2, %ecx
movq 56(%rsp), %rsi
movq %r13, %rdi
call cudaMemcpy@PLT
movss 0(%r13), %xmm7
movss %xmm7, 8(%rsp)
movsd 40(%rsp), %xmm1
movl $1, %eax
pxor %xmm0, %xmm0
comisd %xmm0, %xmm1
jbe .L39
.L42:
movss -4(%r13,%rax,4), %xmm0
minss 8(%rsp), %xmm0
movss %xmm0, 8(%rsp)
pxor %xmm0, %xmm0
cvtsi2sdl %eax, %xmm0
addq $1, %rax
comisd %xmm0, %xmm1
ja .L42
.L39:
call clock@PLT
movq 16(%rsp), %rcx
subq %rcx, %rax
pxor %xmm1, %xmm1
cvtsi2sdq %rax, %xmm1
pxor %xmm0, %xmm0
cvtss2sd 8(%rsp), %xmm0
divsd .LC6(%rip), %xmm1
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $2, %eax
call __printf_chk@PLT
movq 48(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rdi
call cudaFree@PLT
movq 64(%rsp), %rdi
call cudaFree@PLT
movq 72(%rsp), %rdi
call cudaFree@PLT
movq %r14, %rdi
call free@PLT
movq %r13, %rdi
call free@PLT
.L43:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L57
movl $0, %eax
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L13:
.cfi_restore_state
shrq %rax
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
addsd %xmm0, %xmm0
jmp .L14
.L15:
subsd .LC0(%rip), %xmm0
cvttsd2siq %xmm0, %rdi
btcq $63, %rdi
jmp .L16
.L17:
subsd .LC0(%rip), %xmm0
cvttsd2siq %xmm0, %rdi
btcq $63, %rdi
jmp .L18
.L21:
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
divss .LC3(%rip), %xmm0
subss .LC4(%rip), %xmm0
movss %xmm0, 0(%rbp)
addq $4, %rbp
addl $1, %ebx
cmpl %ebx, 36(%rsp)
jg .L21
.L23:
addl $1, %r12d
movq 40(%rsp), %rbx
pxor %xmm0, %xmm0
cvtsi2sdl %r12d, %xmm0
movq %rbx, %xmm2
comisd %xmm0, %xmm2
jbe .L20
.L19:
movl $0, %ebx
cmpl $0, 36(%rsp)
jg .L21
jmp .L23
.L24:
shrq %rax
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
addsd %xmm0, %xmm0
jmp .L25
.L26:
subsd .LC0(%rip), %xmm0
cvttsd2siq %xmm0, %rsi
btcq $63, %rsi
jmp .L27
.L28:
subsd .LC0(%rip), %xmm0
cvttsd2siq %xmm0, %rsi
btcq $63, %rsi
jmp .L29
.L30:
shrq %rax
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
addsd %xmm0, %xmm0
jmp .L31
.L32:
subsd .LC0(%rip), %xmm0
cvttsd2siq %xmm0, %rdx
btcq $63, %rdx
jmp .L33
.L34:
subsd .LC0(%rip), %xmm0
cvttsd2siq %xmm0, %rdx
btcq $63, %rdx
jmp .L35
.L56:
movq 72(%rsp), %rcx
movq 64(%rsp), %rdx
movq 56(%rsp), %rsi
movq 48(%rsp), %rdi
call _Z42__device_stub__Z16calculations_invPfS_PiPlPfS_PiPl
jmp .L36
.L37:
subsd .LC0(%rip), %xmm0
cvttsd2siq %xmm0, %rdx
btcq $63, %rdx
jmp .L38
.L12:
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq .LC10(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %edx
leaq .LC11(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %edx
leaq .LC12(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L43
.L57:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC13:
.string "_Z16calculations_invPfS_PiPl"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC13(%rip), %rdx
movq %rdx, %rcx
leaq _Z16calculations_invPfS_PiPl(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC0:
.long 0
.long 1138753536
.align 8
.LC1:
.long 0
.long 1074790400
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC3:
.long 1280101581
.align 4
.LC4:
.long 1101004800
.section .rodata.cst8
.align 8
.LC6:
.long 0
.long 1093567616
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4:
|
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
|
#include <stdio.h>
#include <math.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <time.h>
__global__ void calculations_inv(float *d_array, float *ans,int *n, long *iter)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
float *d_ptr_array = d_array;
int i;
//obliczanie sumy
//ustawienie wskaznika na odpowiedni wiersz tablicy
if(id < *iter)
{
float add = 0.0;
float mult = 1.0;
d_ptr_array = d_ptr_array + id; // przejscie do wiersza "id"
for(i = 0 ; i < *n; i++)
{
add += *d_ptr_array * *d_ptr_array;
d_ptr_array++;
}
//ustawienie wskaznika ponownie na poczatek tablicy
d_ptr_array = d_array + id;
mult = cos(*d_ptr_array);
d_ptr_array++;
for(i = 1 ; i < *n; i++)
{
mult = mult * cos(*d_ptr_array/(i+1));
d_ptr_array++;
}
ans[id] = 1 / 40 * add + 1 - mult;
}
}
int main(int argc, char const *argv[])
{
if(argc == 5)
{
clock_t start, end;
double used_time;
start = clock();
int i; // iterator
int j; // iterator
//zmienne z lini argumentów wywołania programu
const int n = atoi(argv[1]); // wymiar zadania
const int I = atoi(argv[2]); // liczba iteracji - > obliczenia przeprowadzane sa na wartosci I^n
const double iter = pow(I, n);
const int Blocks = atoi(argv[3]); // liczba bloków GPU
const int Threads = atoi(argv[4]); // liczba watków dla jednego bloku GPU
//const float x_min = -20.0; // minimalna warotsc dziedziny zadania
const float x_max = 20.0; // maksymalna wartosc dziedziny zadania
const float rand_max = RAND_MAX / 40.0; //ograniczenie przedzialu losowania zmiennych
float *h_random_array = (float* ) malloc(sizeof(float) * n * iter); //do operacji na danych stosowana jest tablica jednowymiarowa ze wzgledu na alokacje pamieci w GPU
float *h_ptr_iterator = h_random_array;
float *ans = (float* ) malloc(sizeof(float) * iter);
//losowanie wartosci i umieszczenie ich w tablicy
for(i = 0 ; i < iter; i++)
{
for(j = 0 ; j < n ; j++)
{
*h_ptr_iterator = rand() / rand_max - x_max;
h_ptr_iterator += 1;
}
}
float *d_random_array; //tablica zmiennych wylosowanych w pamieci GPU
float *d_ans; //tablica wynikow
int *d_n; // wymiar
long *d_iter; //ilosc iteratcji
cudaMalloc((void **)&d_random_array, sizeof(float) * n * iter);
cudaMalloc((void **)&d_ans, sizeof(float) * iter);
cudaMalloc((void **)&d_n, sizeof(int));
cudaMalloc((void **)&d_iter, sizeof(long));
cudaMemcpy(d_random_array, h_random_array, sizeof(float) * n * iter, cudaMemcpyHostToDevice);
cudaMemcpy(d_ans, ans, sizeof(float) * iter, cudaMemcpyHostToDevice);
cudaMemcpy(d_n, &n, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_iter, &iter, sizeof(long) , cudaMemcpyHostToDevice);
printf("Liczba blocków : n [%d] Liczba iteracji [%f] , bloki [%d] watki : [%d]", n , iter, Blocks , Threads);
calculations_inv<<<Blocks, Threads>>>(d_random_array, d_ans, d_n, d_iter);
cudaMemcpy(ans, d_ans, sizeof(float) * iter , cudaMemcpyDeviceToHost);
//szukanie minimum
float y_min = ans[0];
for(i = 0 ; i < iter; i++)
{
if(ans[i] < y_min) y_min = ans[i];
}
end = clock();
used_time = ((double) (end - start) / CLOCKS_PER_SEC);
printf("szukane minimum : %f - czas : %f \n " , y_min, used_time);
cudaFree(d_random_array);
cudaFree(d_ans);
cudaFree(d_n);
cudaFree(d_iter);
free(h_random_array);
free(ans);
}
else
{
printf("Invalid program parameters plese type /main2 N I Blocks Threads where \n");
printf("N - is problem dimension\n");
printf("I - is number of iteratios\n");
printf("Blocks - is number of used GPU blocks...max is %d\n", 0);
printf("Threads- is number of used GPU threads per one block ... max is %d \n", 0);
return 0;
}
return 0;
}
|
#include <stdio.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <time.h>
__global__ void calculations_inv(float *d_array, float *ans,int *n, long *iter)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
float *d_ptr_array = d_array;
int i;
//obliczanie sumy
//ustawienie wskaznika na odpowiedni wiersz tablicy
if(id < *iter)
{
float add = 0.0;
float mult = 1.0;
d_ptr_array = d_ptr_array + id; // przejscie do wiersza "id"
for(i = 0 ; i < *n; i++)
{
add += *d_ptr_array * *d_ptr_array;
d_ptr_array++;
}
//ustawienie wskaznika ponownie na poczatek tablicy
d_ptr_array = d_array + id;
mult = cos(*d_ptr_array);
d_ptr_array++;
for(i = 1 ; i < *n; i++)
{
mult = mult * cos(*d_ptr_array/(i+1));
d_ptr_array++;
}
ans[id] = 1 / 40 * add + 1 - mult;
}
}
int main(int argc, char const *argv[])
{
if(argc == 5)
{
clock_t start, end;
double used_time;
start = clock();
int i; // iterator
int j; // iterator
//zmienne z lini argumentów wywołania programu
const int n = atoi(argv[1]); // wymiar zadania
const int I = atoi(argv[2]); // liczba iteracji - > obliczenia przeprowadzane sa na wartosci I^n
const double iter = pow(I, n);
const int Blocks = atoi(argv[3]); // liczba bloków GPU
const int Threads = atoi(argv[4]); // liczba watków dla jednego bloku GPU
//const float x_min = -20.0; // minimalna warotsc dziedziny zadania
const float x_max = 20.0; // maksymalna wartosc dziedziny zadania
const float rand_max = RAND_MAX / 40.0; //ograniczenie przedzialu losowania zmiennych
float *h_random_array = (float* ) malloc(sizeof(float) * n * iter); //do operacji na danych stosowana jest tablica jednowymiarowa ze wzgledu na alokacje pamieci w GPU
float *h_ptr_iterator = h_random_array;
float *ans = (float* ) malloc(sizeof(float) * iter);
//losowanie wartosci i umieszczenie ich w tablicy
for(i = 0 ; i < iter; i++)
{
for(j = 0 ; j < n ; j++)
{
*h_ptr_iterator = rand() / rand_max - x_max;
h_ptr_iterator += 1;
}
}
float *d_random_array; //tablica zmiennych wylosowanych w pamieci GPU
float *d_ans; //tablica wynikow
int *d_n; // wymiar
long *d_iter; //ilosc iteratcji
hipMalloc((void **)&d_random_array, sizeof(float) * n * iter);
hipMalloc((void **)&d_ans, sizeof(float) * iter);
hipMalloc((void **)&d_n, sizeof(int));
hipMalloc((void **)&d_iter, sizeof(long));
hipMemcpy(d_random_array, h_random_array, sizeof(float) * n * iter, hipMemcpyHostToDevice);
hipMemcpy(d_ans, ans, sizeof(float) * iter, hipMemcpyHostToDevice);
hipMemcpy(d_n, &n, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_iter, &iter, sizeof(long) , hipMemcpyHostToDevice);
printf("Liczba blocków : n [%d] Liczba iteracji [%f] , bloki [%d] watki : [%d]", n , iter, Blocks , Threads);
calculations_inv<<<Blocks, Threads>>>(d_random_array, d_ans, d_n, d_iter);
hipMemcpy(ans, d_ans, sizeof(float) * iter , hipMemcpyDeviceToHost);
//szukanie minimum
float y_min = ans[0];
for(i = 0 ; i < iter; i++)
{
if(ans[i] < y_min) y_min = ans[i];
}
end = clock();
used_time = ((double) (end - start) / CLOCKS_PER_SEC);
printf("szukane minimum : %f - czas : %f \n " , y_min, used_time);
hipFree(d_random_array);
hipFree(d_ans);
hipFree(d_n);
hipFree(d_iter);
free(h_random_array);
free(ans);
}
else
{
printf("Invalid program parameters plese type /main2 N I Blocks Threads where \n");
printf("N - is problem dimension\n");
printf("I - is number of iteratios\n");
printf("Blocks - is number of used GPU blocks...max is %d\n", 0);
printf("Threads- is number of used GPU threads per one block ... max is %d \n", 0);
return 0;
}
return 0;
}
|
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
|
#include <stdio.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <time.h>
__global__ void calculations_inv(float *d_array, float *ans,int *n, long *iter)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
float *d_ptr_array = d_array;
int i;
//obliczanie sumy
//ustawienie wskaznika na odpowiedni wiersz tablicy
if(id < *iter)
{
float add = 0.0;
float mult = 1.0;
d_ptr_array = d_ptr_array + id; // przejscie do wiersza "id"
for(i = 0 ; i < *n; i++)
{
add += *d_ptr_array * *d_ptr_array;
d_ptr_array++;
}
//ustawienie wskaznika ponownie na poczatek tablicy
d_ptr_array = d_array + id;
mult = cos(*d_ptr_array);
d_ptr_array++;
for(i = 1 ; i < *n; i++)
{
mult = mult * cos(*d_ptr_array/(i+1));
d_ptr_array++;
}
ans[id] = 1 / 40 * add + 1 - mult;
}
}
int main(int argc, char const *argv[])
{
if(argc == 5)
{
clock_t start, end;
double used_time;
start = clock();
int i; // iterator
int j; // iterator
//zmienne z lini argumentów wywołania programu
const int n = atoi(argv[1]); // wymiar zadania
const int I = atoi(argv[2]); // liczba iteracji - > obliczenia przeprowadzane sa na wartosci I^n
const double iter = pow(I, n);
const int Blocks = atoi(argv[3]); // liczba bloków GPU
const int Threads = atoi(argv[4]); // liczba watków dla jednego bloku GPU
//const float x_min = -20.0; // minimalna warotsc dziedziny zadania
const float x_max = 20.0; // maksymalna wartosc dziedziny zadania
const float rand_max = RAND_MAX / 40.0; //ograniczenie przedzialu losowania zmiennych
float *h_random_array = (float* ) malloc(sizeof(float) * n * iter); //do operacji na danych stosowana jest tablica jednowymiarowa ze wzgledu na alokacje pamieci w GPU
float *h_ptr_iterator = h_random_array;
float *ans = (float* ) malloc(sizeof(float) * iter);
//losowanie wartosci i umieszczenie ich w tablicy
for(i = 0 ; i < iter; i++)
{
for(j = 0 ; j < n ; j++)
{
*h_ptr_iterator = rand() / rand_max - x_max;
h_ptr_iterator += 1;
}
}
float *d_random_array; //tablica zmiennych wylosowanych w pamieci GPU
float *d_ans; //tablica wynikow
int *d_n; // wymiar
long *d_iter; //ilosc iteratcji
hipMalloc((void **)&d_random_array, sizeof(float) * n * iter);
hipMalloc((void **)&d_ans, sizeof(float) * iter);
hipMalloc((void **)&d_n, sizeof(int));
hipMalloc((void **)&d_iter, sizeof(long));
hipMemcpy(d_random_array, h_random_array, sizeof(float) * n * iter, hipMemcpyHostToDevice);
hipMemcpy(d_ans, ans, sizeof(float) * iter, hipMemcpyHostToDevice);
hipMemcpy(d_n, &n, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_iter, &iter, sizeof(long) , hipMemcpyHostToDevice);
printf("Liczba blocków : n [%d] Liczba iteracji [%f] , bloki [%d] watki : [%d]", n , iter, Blocks , Threads);
calculations_inv<<<Blocks, Threads>>>(d_random_array, d_ans, d_n, d_iter);
hipMemcpy(ans, d_ans, sizeof(float) * iter , hipMemcpyDeviceToHost);
//szukanie minimum
float y_min = ans[0];
for(i = 0 ; i < iter; i++)
{
if(ans[i] < y_min) y_min = ans[i];
}
end = clock();
used_time = ((double) (end - start) / CLOCKS_PER_SEC);
printf("szukane minimum : %f - czas : %f \n " , y_min, used_time);
hipFree(d_random_array);
hipFree(d_ans);
hipFree(d_n);
hipFree(d_iter);
free(h_random_array);
free(ans);
}
else
{
printf("Invalid program parameters plese type /main2 N I Blocks Threads where \n");
printf("N - is problem dimension\n");
printf("I - is number of iteratios\n");
printf("Blocks - is number of used GPU blocks...max is %d\n", 0);
printf("Threads- is number of used GPU threads per one block ... max is %d \n", 0);
return 0;
}
return 0;
}
|
.text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z16calculations_invPfS_PiPl
.globl _Z16calculations_invPfS_PiPl
.p2align 8
.type _Z16calculations_invPfS_PiPl,@function
_Z16calculations_invPfS_PiPl:
s_clause 0x1
s_load_b64 s[2:3], s[0:1], 0x18
s_load_b32 s4, s[0:1], 0x2c
s_waitcnt lgkmcnt(0)
s_load_b64 s[2:3], s[2:3], 0x0
s_and_b32 s4, s4, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s4, v[0:1]
v_ashrrev_i32_e32 v2, 31, v1
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1)
v_cmp_gt_i64_e32 vcc_lo, s[2:3], v[1:2]
s_and_saveexec_b32 s2, vcc_lo
s_cbranch_execz .LBB0_18
s_clause 0x1
s_load_b64 s[2:3], s[0:1], 0x10
s_load_b64 s[4:5], s[0:1], 0x0
v_lshlrev_b64 v[3:4], 2, v[1:2]
s_waitcnt lgkmcnt(0)
s_load_b32 s6, s[2:3], 0x0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v3, vcc_lo, s4, v3
v_add_co_ci_u32_e32 v4, vcc_lo, s5, v4, vcc_lo
s_waitcnt lgkmcnt(0)
s_cmp_lt_i32 s6, 1
s_cbranch_scc1 .LBB0_5
s_delay_alu instid0(VALU_DEP_1)
v_mov_b32_e32 v6, v4
v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v5, v3
s_mov_b32 s2, s6
.LBB0_3:
global_load_b32 v7, v[5:6], off
v_add_co_u32 v5, vcc_lo, v5, 4
v_add_co_ci_u32_e32 v6, vcc_lo, 0, v6, vcc_lo
s_add_i32 s2, s2, -1
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_lg_u32 s2, 0
s_waitcnt vmcnt(0)
v_fmac_f32_e32 v0, v7, v7
s_cbranch_scc1 .LBB0_3
s_delay_alu instid0(VALU_DEP_1)
v_fma_f32 v0, v0, 0, 1.0
s_branch .LBB0_6
.LBB0_5:
v_mov_b32_e32 v0, 1.0
.LBB0_6:
global_load_b32 v3, v[3:4], off
s_mov_b32 s3, exec_lo
s_waitcnt vmcnt(0)
v_cmpx_ngt_f32_e64 0x48000000, |v3|
s_xor_b32 s7, exec_lo, s3
s_cbranch_execz .LBB0_8
v_dual_mov_b32 v6, 0 :: v_dual_and_b32 v11, 0x7fffffff, v3
s_mov_b32 s2, 0x7fffff
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_and_or_b32 v14, v11, s2, 0x800000
v_lshrrev_b32_e32 v11, 23, v11
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mad_u64_u32 v[4:5], null, v14, 0xfe5163ab, 0
v_add_nc_u32_e32 v12, 0xffffff88, v11
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
v_cmp_lt_u32_e32 vcc_lo, 63, v12
v_mad_u64_u32 v[7:8], null, v14, 0x3c439041, v[5:6]
v_cndmask_b32_e64 v13, 0, 0xffffffc0, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mov_b32_e32 v5, v8
v_add_nc_u32_e32 v13, v13, v12
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mad_u64_u32 v[8:9], null, v14, 0xdb629599, v[5:6]
v_cmp_lt_u32_e64 s2, 31, v13
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
v_cndmask_b32_e64 v15, 0, 0xffffffe0, s2
v_dual_mov_b32 v5, v9 :: v_dual_cndmask_b32 v4, v8, v4
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v15, v15, v13
v_mad_u64_u32 v[9:10], null, v14, 0xf534ddc0, v[5:6]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cmp_lt_u32_e64 s3, 31, v15
v_mov_b32_e32 v5, v10
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cndmask_b32_e32 v7, v9, v7, vcc_lo
v_mad_u64_u32 v[10:11], null, v14, 0xfc2757d1, v[5:6]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cndmask_b32_e64 v4, v7, v4, s2
v_mov_b32_e32 v5, v11
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[11:12], null, v14, 0x4e441529, v[5:6]
v_mov_b32_e32 v5, v12
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[12:13], null, v14, 0xa2f9836e, v[5:6]
v_cndmask_b32_e64 v5, 0, 0xffffffe0, s3
v_dual_cndmask_b32 v6, v11, v9 :: v_dual_add_nc_u32 v5, v5, v15
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_dual_cndmask_b32 v12, v12, v10 :: v_dual_cndmask_b32 v11, v13, v11
v_cndmask_b32_e32 v10, v10, v8, vcc_lo
v_cmp_eq_u32_e32 vcc_lo, 0, v5
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
v_cndmask_b32_e64 v9, v12, v6, s2
v_cndmask_b32_e64 v11, v11, v12, s2
s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
v_cndmask_b32_e64 v6, v6, v10, s2
v_sub_nc_u32_e32 v12, 32, v5
v_cndmask_b32_e64 v10, v10, v7, s2
v_cndmask_b32_e64 v11, v11, v9, s3
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
v_cndmask_b32_e64 v9, v9, v6, s3
v_cndmask_b32_e64 v6, v6, v10, s3
v_cndmask_b32_e64 v4, v10, v4, s3
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_alignbit_b32 v13, v11, v9, v12
v_alignbit_b32 v8, v9, v6, v12
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4)
v_cndmask_b32_e32 v5, v13, v11, vcc_lo
v_alignbit_b32 v11, v6, v4, v12
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_cndmask_b32_e32 v7, v8, v9, vcc_lo
v_bfe_u32 v8, v5, 29, 1
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_cndmask_b32_e32 v6, v11, v6, vcc_lo
v_alignbit_b32 v9, v5, v7, 30
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_sub_nc_u32_e32 v10, 0, v8
v_alignbit_b32 v7, v7, v6, 30
v_alignbit_b32 v4, v6, v4, 30
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_xor_b32_e32 v9, v9, v10
v_xor_b32_e32 v6, v7, v10
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_xor_b32_e32 v4, v4, v10
v_clz_i32_u32_e32 v11, v9
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_min_u32_e32 v11, 32, v11
v_sub_nc_u32_e32 v7, 31, v11
v_lshlrev_b32_e32 v13, 23, v11
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
v_alignbit_b32 v9, v9, v6, v7
v_alignbit_b32 v4, v6, v4, v7
v_lshrrev_b32_e32 v7, 29, v5
v_alignbit_b32 v6, v9, v4, 9
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_lshlrev_b32_e32 v7, 31, v7
v_lshrrev_b32_e32 v9, 9, v9
v_clz_i32_u32_e32 v10, v6
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
v_or_b32_e32 v12, 0.5, v7
v_min_u32_e32 v10, 32, v10
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_sub_nc_u32_e32 v12, v12, v13
v_sub_nc_u32_e32 v14, 31, v10
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
v_alignbit_b32 v4, v6, v4, v14
v_or_b32_e32 v6, v9, v12
v_add_lshl_u32 v9, v10, v11, 23
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_lshrrev_b32_e32 v4, 9, v4
v_mul_f32_e32 v10, 0x3fc90fda, v6
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_sub_nc_u32_e32 v4, v4, v9
v_fma_f32 v9, v6, 0x3fc90fda, -v10
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v4, 0x33000000, v4
v_fmamk_f32 v6, v6, 0x33a22168, v9
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_or_b32_e32 v4, v4, v7
v_fmac_f32_e32 v6, 0x3fc90fda, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_add_f32_e32 v4, v10, v6
v_lshrrev_b32_e32 v5, 30, v5
v_add_nc_u32_e32 v5, v8, v5
.LBB0_8:
s_and_not1_saveexec_b32 s2, s7
v_mul_f32_e64 v4, 0x3f22f983, |v3|
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_rndne_f32_e32 v5, v4
v_fma_f32 v4, v5, 0xbfc90fda, |v3|
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fmamk_f32 v4, v5, 0xb3a22168, v4
v_fmamk_f32 v4, v5, 0xa7c234c4, v4
v_cvt_i32_f32_e32 v5, v5
s_or_b32 exec_lo, exec_lo, s2
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_dual_mul_f32 v6, v4, v4 :: v_dual_and_b32 v9, 1, v5
s_mov_b32 s7, 0xb94c1982
s_mov_b32 s8, 0x37d75334
s_cmp_lt_i32 s6, 2
v_fmaak_f32 v7, s7, v6, 0x3c0881c4
v_cmp_eq_u32_e32 vcc_lo, 0, v9
s_mov_b32 s9, 1
v_lshlrev_b32_e32 v5, 30, v5
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_fmaak_f32 v7, v6, v7, 0xbe2aaa9d
v_fmaak_f32 v8, s8, v6, 0xbab64f3b
v_and_b32_e32 v5, 0x80000000, v5
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_mul_f32_e32 v7, v6, v7
v_fmaak_f32 v8, v6, v8, 0x3d2aabf7
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_fmac_f32_e32 v4, v4, v7
v_fmaak_f32 v8, v6, v8, 0xbf000004
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f32 v6, v6, v8, 1.0
v_cndmask_b32_e64 v4, -v4, v6, vcc_lo
v_cmp_class_f32_e64 vcc_lo, v3, 0x1f8
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_xor_b32_e32 v4, v5, v4
v_cndmask_b32_e32 v5, 0x7fc00000, v4, vcc_lo
s_cbranch_scc1 .LBB0_17
v_lshlrev_b64 v[3:4], 2, v[1:2]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v3, vcc_lo, v3, s4
v_add_co_ci_u32_e32 v4, vcc_lo, s5, v4, vcc_lo
s_mov_b32 s4, 0x7fffff
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v3, vcc_lo, v3, 4
v_add_co_ci_u32_e32 v4, vcc_lo, 0, v4, vcc_lo
s_branch .LBB0_13
.LBB0_12:
s_or_b32 exec_lo, exec_lo, s2
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
v_dual_mul_f32 v9, v7, v7 :: v_dual_and_b32 v12, 1, v8
s_cmp_lg_u32 s6, s9
v_lshlrev_b32_e32 v8, 30, v8
v_fmaak_f32 v10, s7, v9, 0x3c0881c4
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_cmp_eq_u32_e32 vcc_lo, 0, v12
v_and_b32_e32 v8, 0x80000000, v8
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fmaak_f32 v10, v9, v10, 0xbe2aaa9d
v_dual_fmaak_f32 v11, s8, v9, 0xbab64f3b :: v_dual_mul_f32 v10, v9, v10
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_fmaak_f32 v11, v9, v11, 0x3d2aabf7
v_fmac_f32_e32 v7, v7, v10
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fmaak_f32 v11, v9, v11, 0xbf000004
v_fma_f32 v9, v9, v11, 1.0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_cndmask_b32_e64 v7, -v7, v9, vcc_lo
v_cmp_class_f32_e64 vcc_lo, v6, 0x1f8
v_xor_b32_e32 v7, v8, v7
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
v_cndmask_b32_e32 v6, 0x7fc00000, v7, vcc_lo
v_add_co_u32 v3, vcc_lo, v3, 4
v_add_co_ci_u32_e32 v4, vcc_lo, 0, v4, vcc_lo
v_mul_f32_e32 v5, v5, v6
s_cbranch_scc0 .LBB0_17
.LBB0_13:
global_load_b32 v6, v[3:4], off
s_add_i32 s9, s9, 1
s_mov_b32 s3, exec_lo
v_cvt_f32_i32_e32 v7, s9
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_div_scale_f32 v8, null, v7, v7, v6
v_rcp_f32_e32 v9, v8
s_waitcnt_depctr 0xfff
v_fma_f32 v10, -v8, v9, 1.0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_fmac_f32_e32 v9, v10, v9
v_div_scale_f32 v10, vcc_lo, v6, v7, v6
v_mul_f32_e32 v11, v10, v9
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f32 v12, -v8, v11, v10
v_fmac_f32_e32 v11, v12, v9
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f32 v8, -v8, v11, v10
v_div_fmas_f32 v8, v8, v9, v11
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_div_fixup_f32 v6, v8, v7, v6
v_cmpx_ngt_f32_e64 0x48000000, |v6|
s_xor_b32 s5, exec_lo, s3
s_cbranch_execz .LBB0_15
v_dual_mov_b32 v9, 0 :: v_dual_and_b32 v14, 0x7fffffff, v6
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_and_or_b32 v17, v14, s4, 0x800000
v_lshrrev_b32_e32 v14, 23, v14
v_mad_u64_u32 v[7:8], null, v17, 0xfe5163ab, 0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v15, 0xffffff88, v14
v_cmp_lt_u32_e32 vcc_lo, 63, v15
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_mad_u64_u32 v[10:11], null, v17, 0x3c439041, v[8:9]
v_cndmask_b32_e64 v16, 0, 0xffffffc0, vcc_lo
v_mov_b32_e32 v8, v11
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v16, v16, v15
v_mad_u64_u32 v[11:12], null, v17, 0xdb629599, v[8:9]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_lt_u32_e64 s2, 31, v16
v_cndmask_b32_e64 v18, 0, 0xffffffe0, s2
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
v_dual_mov_b32 v8, v12 :: v_dual_cndmask_b32 v7, v11, v7
v_add_nc_u32_e32 v18, v18, v16
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mad_u64_u32 v[12:13], null, v17, 0xf534ddc0, v[8:9]
v_cmp_lt_u32_e64 s3, 31, v18
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_mov_b32_e32 v8, v13
v_cndmask_b32_e32 v10, v12, v10, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mad_u64_u32 v[13:14], null, v17, 0xfc2757d1, v[8:9]
v_cndmask_b32_e64 v7, v10, v7, s2
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mov_b32_e32 v8, v14
v_mad_u64_u32 v[14:15], null, v17, 0x4e441529, v[8:9]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mov_b32_e32 v8, v15
v_mad_u64_u32 v[15:16], null, v17, 0xa2f9836e, v[8:9]
v_cndmask_b32_e64 v8, 0, 0xffffffe0, s3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
v_dual_cndmask_b32 v9, v14, v12 :: v_dual_add_nc_u32 v8, v8, v18
v_dual_cndmask_b32 v15, v15, v13 :: v_dual_cndmask_b32 v14, v16, v14
v_cndmask_b32_e32 v13, v13, v11, vcc_lo
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_cmp_eq_u32_e32 vcc_lo, 0, v8
v_cndmask_b32_e64 v12, v15, v9, s2
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
v_cndmask_b32_e64 v14, v14, v15, s2
v_cndmask_b32_e64 v9, v9, v13, s2
v_sub_nc_u32_e32 v15, 32, v8
v_cndmask_b32_e64 v13, v13, v10, s2
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
v_cndmask_b32_e64 v14, v14, v12, s3
v_cndmask_b32_e64 v12, v12, v9, s3
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_cndmask_b32_e64 v9, v9, v13, s3
v_cndmask_b32_e64 v7, v13, v7, s3
v_alignbit_b32 v16, v14, v12, v15
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
v_alignbit_b32 v11, v12, v9, v15
v_cndmask_b32_e32 v8, v16, v14, vcc_lo
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
v_alignbit_b32 v14, v9, v7, v15
v_cndmask_b32_e32 v10, v11, v12, vcc_lo
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_bfe_u32 v11, v8, 29, 1
v_cndmask_b32_e32 v9, v14, v9, vcc_lo
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_alignbit_b32 v12, v8, v10, 30
v_sub_nc_u32_e32 v13, 0, v11
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_alignbit_b32 v10, v10, v9, 30
v_alignbit_b32 v7, v9, v7, 30
v_xor_b32_e32 v12, v12, v13
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_xor_b32_e32 v9, v10, v13
v_xor_b32_e32 v7, v7, v13
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_clz_i32_u32_e32 v14, v12
v_min_u32_e32 v14, 32, v14
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_sub_nc_u32_e32 v10, 31, v14
v_lshlrev_b32_e32 v16, 23, v14
v_alignbit_b32 v12, v12, v9, v10
v_alignbit_b32 v7, v9, v7, v10
v_lshrrev_b32_e32 v10, 29, v8
v_lshrrev_b32_e32 v8, 30, v8
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_alignbit_b32 v9, v12, v7, 9
v_lshlrev_b32_e32 v10, 31, v10
v_lshrrev_b32_e32 v12, 9, v12
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_nc_u32_e32 v8, v11, v8
v_clz_i32_u32_e32 v13, v9
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
v_or_b32_e32 v15, 0.5, v10
v_min_u32_e32 v13, 32, v13
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_sub_nc_u32_e32 v15, v15, v16
v_sub_nc_u32_e32 v17, 31, v13
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
v_alignbit_b32 v7, v9, v7, v17
v_or_b32_e32 v9, v12, v15
v_add_lshl_u32 v12, v13, v14, 23
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_lshrrev_b32_e32 v7, 9, v7
v_mul_f32_e32 v13, 0x3fc90fda, v9
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_sub_nc_u32_e32 v7, v7, v12
v_fma_f32 v12, v9, 0x3fc90fda, -v13
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v7, 0x33000000, v7
v_fmac_f32_e32 v12, 0x33a22168, v9
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_or_b32_e32 v7, v7, v10
v_fmac_f32_e32 v12, 0x3fc90fda, v7
s_delay_alu instid0(VALU_DEP_1)
v_add_f32_e32 v7, v13, v12
.LBB0_15:
s_and_not1_saveexec_b32 s2, s5
s_cbranch_execz .LBB0_12
v_mul_f32_e64 v7, 0x3f22f983, |v6|
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_rndne_f32_e32 v8, v7
v_fma_f32 v7, v8, 0xbfc90fda, |v6|
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fmac_f32_e32 v7, 0xb3a22168, v8
v_fmac_f32_e32 v7, 0xa7c234c4, v8
v_cvt_i32_f32_e32 v8, v8
s_branch .LBB0_12
.LBB0_17:
s_load_b64 s[0:1], s[0:1], 0x8
v_lshlrev_b64 v[1:2], 2, v[1:2]
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_sub_f32_e32 v3, v0, v5
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s0, v1
s_delay_alu instid0(VALU_DEP_3)
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v2, vcc_lo
global_store_b32 v[0:1], v3, off
.LBB0_18:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z16calculations_invPfS_PiPl
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 19
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z16calculations_invPfS_PiPl, .Lfunc_end0-_Z16calculations_invPfS_PiPl
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z16calculations_invPfS_PiPl
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z16calculations_invPfS_PiPl.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 19
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata
|
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
|
#include <stdio.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <time.h>
__global__ void calculations_inv(float *d_array, float *ans,int *n, long *iter)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
float *d_ptr_array = d_array;
int i;
//obliczanie sumy
//ustawienie wskaznika na odpowiedni wiersz tablicy
if(id < *iter)
{
float add = 0.0;
float mult = 1.0;
d_ptr_array = d_ptr_array + id; // przejscie do wiersza "id"
for(i = 0 ; i < *n; i++)
{
add += *d_ptr_array * *d_ptr_array;
d_ptr_array++;
}
//ustawienie wskaznika ponownie na poczatek tablicy
d_ptr_array = d_array + id;
mult = cos(*d_ptr_array);
d_ptr_array++;
for(i = 1 ; i < *n; i++)
{
mult = mult * cos(*d_ptr_array/(i+1));
d_ptr_array++;
}
ans[id] = 1 / 40 * add + 1 - mult;
}
}
int main(int argc, char const *argv[])
{
if(argc == 5)
{
clock_t start, end;
double used_time;
start = clock();
int i; // iterator
int j; // iterator
//zmienne z lini argumentów wywołania programu
const int n = atoi(argv[1]); // wymiar zadania
const int I = atoi(argv[2]); // liczba iteracji - > obliczenia przeprowadzane sa na wartosci I^n
const double iter = pow(I, n);
const int Blocks = atoi(argv[3]); // liczba bloków GPU
const int Threads = atoi(argv[4]); // liczba watków dla jednego bloku GPU
//const float x_min = -20.0; // minimalna warotsc dziedziny zadania
const float x_max = 20.0; // maksymalna wartosc dziedziny zadania
const float rand_max = RAND_MAX / 40.0; //ograniczenie przedzialu losowania zmiennych
float *h_random_array = (float* ) malloc(sizeof(float) * n * iter); //do operacji na danych stosowana jest tablica jednowymiarowa ze wzgledu na alokacje pamieci w GPU
float *h_ptr_iterator = h_random_array;
float *ans = (float* ) malloc(sizeof(float) * iter);
//losowanie wartosci i umieszczenie ich w tablicy
for(i = 0 ; i < iter; i++)
{
for(j = 0 ; j < n ; j++)
{
*h_ptr_iterator = rand() / rand_max - x_max;
h_ptr_iterator += 1;
}
}
float *d_random_array; //tablica zmiennych wylosowanych w pamieci GPU
float *d_ans; //tablica wynikow
int *d_n; // wymiar
long *d_iter; //ilosc iteratcji
hipMalloc((void **)&d_random_array, sizeof(float) * n * iter);
hipMalloc((void **)&d_ans, sizeof(float) * iter);
hipMalloc((void **)&d_n, sizeof(int));
hipMalloc((void **)&d_iter, sizeof(long));
hipMemcpy(d_random_array, h_random_array, sizeof(float) * n * iter, hipMemcpyHostToDevice);
hipMemcpy(d_ans, ans, sizeof(float) * iter, hipMemcpyHostToDevice);
hipMemcpy(d_n, &n, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_iter, &iter, sizeof(long) , hipMemcpyHostToDevice);
printf("Liczba blocków : n [%d] Liczba iteracji [%f] , bloki [%d] watki : [%d]", n , iter, Blocks , Threads);
calculations_inv<<<Blocks, Threads>>>(d_random_array, d_ans, d_n, d_iter);
hipMemcpy(ans, d_ans, sizeof(float) * iter , hipMemcpyDeviceToHost);
//szukanie minimum
float y_min = ans[0];
for(i = 0 ; i < iter; i++)
{
if(ans[i] < y_min) y_min = ans[i];
}
end = clock();
used_time = ((double) (end - start) / CLOCKS_PER_SEC);
printf("szukane minimum : %f - czas : %f \n " , y_min, used_time);
hipFree(d_random_array);
hipFree(d_ans);
hipFree(d_n);
hipFree(d_iter);
free(h_random_array);
free(ans);
}
else
{
printf("Invalid program parameters plese type /main2 N I Blocks Threads where \n");
printf("N - is problem dimension\n");
printf("I - is number of iteratios\n");
printf("Blocks - is number of used GPU blocks...max is %d\n", 0);
printf("Threads- is number of used GPU threads per one block ... max is %d \n", 0);
return 0;
}
return 0;
}
|
.text
.file "main3.hip"
.globl _Z31__device_stub__calculations_invPfS_PiPl # -- Begin function _Z31__device_stub__calculations_invPfS_PiPl
.p2align 4, 0x90
.type _Z31__device_stub__calculations_invPfS_PiPl,@function
_Z31__device_stub__calculations_invPfS_PiPl: # @_Z31__device_stub__calculations_invPfS_PiPl
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movq %rcx, 48(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rax
movq %rax, 104(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z16calculations_invPfS_PiPl, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z31__device_stub__calculations_invPfS_PiPl, .Lfunc_end0-_Z31__device_stub__calculations_invPfS_PiPl
.cfi_endproc
# -- End function
.section .rodata.cst16,"aM",@progbits,16
.p2align 4, 0x0 # -- Begin function main
.LCPI1_0:
.long 1127219200 # 0x43300000
.long 1160773632 # 0x45300000
.long 0 # 0x0
.long 0 # 0x0
.LCPI1_1:
.quad 0x4330000000000000 # double 4503599627370496
.quad 0x4530000000000000 # double 1.9342813113834067E+25
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0
.LCPI1_2:
.quad 0x43e0000000000000 # double 9.2233720368547758E+18
.LCPI1_3:
.quad 0x4010000000000000 # double 4
.LCPI1_6:
.quad 0x412e848000000000 # double 1.0E+6
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0
.LCPI1_4:
.long 0x4c4ccccd # float 53687092
.LCPI1_5:
.long 0xc1a00000 # float -20
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $200, %rsp
.cfi_def_cfa_offset 256
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
cmpl $5, %edi
jne .LBB1_14
# %bb.1:
movq %rsi, %r14
callq clock
movq %rax, 72(%rsp) # 8-byte Spill
movq 8(%r14), %rdi
xorl %ebp, %ebp
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %r15
movl %r15d, 12(%rsp)
movq 16(%r14), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
cvtsi2sd %eax, %xmm0
cvtsi2sd %r15d, %xmm1
callq pow
movsd %xmm0, 32(%rsp) # 8-byte Spill
movsd %xmm0, 16(%rsp)
movq 24(%r14), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, 40(%rsp) # 8-byte Spill
movq 32(%r14), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %r13
movslq 12(%rsp), %rax
shlq $2, %rax
movq %rax, %xmm0
punpckldq .LCPI1_0(%rip), %xmm0 # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
subpd .LCPI1_1(%rip), %xmm0
movapd %xmm0, %xmm1
unpckhpd %xmm0, %xmm1 # xmm1 = xmm1[1],xmm0[1]
addsd %xmm0, %xmm1
mulsd 32(%rsp), %xmm1 # 8-byte Folded Reload
cvttsd2si %xmm1, %rax
movq %rax, %rcx
sarq $63, %rcx
subsd .LCPI1_2(%rip), %xmm1
cvttsd2si %xmm1, %rdi
andq %rcx, %rdi
orq %rax, %rdi
callq malloc
movsd 16(%rsp), %xmm1 # xmm1 = mem[0],zero
movsd %xmm1, 32(%rsp) # 8-byte Spill
movsd .LCPI1_3(%rip), %xmm0 # xmm0 = mem[0],zero
mulsd %xmm1, %xmm0
cvttsd2si %xmm0, %rcx
movq %rax, %r12
subsd .LCPI1_2(%rip), %xmm0
cvttsd2si %xmm0, %rax
movq %rcx, %rdi
sarq $63, %rdi
andq %rax, %rdi
orq %rcx, %rdi
callq malloc
movsd 32(%rsp), %xmm2 # 8-byte Reload
# xmm2 = mem[0],zero
movq %rax, %r15
xorpd %xmm0, %xmm0
ucomisd %xmm0, %xmm2
jbe .LBB1_7
# %bb.2: # %.preheader.preheader
movq %r12, %r14
jmp .LBB1_3
.p2align 4, 0x90
.LBB1_6: # %._crit_edge
# in Loop: Header=BB1_3 Depth=1
incl %ebp
xorps %xmm0, %xmm0
cvtsi2sd %ebp, %xmm0
movsd 16(%rsp), %xmm2 # xmm2 = mem[0],zero
ucomisd %xmm0, %xmm2
jbe .LBB1_7
.LBB1_3: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB1_5 Depth 2
cmpl $0, 12(%rsp)
jle .LBB1_6
# %bb.4: # %.lr.ph.preheader
# in Loop: Header=BB1_3 Depth=1
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB1_5: # %.lr.ph
# Parent Loop BB1_3 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
movss .LCPI1_4(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
divss %xmm1, %xmm0
movss .LCPI1_5(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero
addss %xmm1, %xmm0
movss %xmm0, (%r14)
addq $4, %r14
incl %ebx
cmpl 12(%rsp), %ebx
jl .LBB1_5
jmp .LBB1_6
.LBB1_14:
movl $.Lstr, %edi
callq puts@PLT
movl $.Lstr.1, %edi
callq puts@PLT
movl $.Lstr.2, %edi
callq puts@PLT
movl $.L.str.5, %edi
xorl %esi, %esi
xorl %eax, %eax
callq printf
movl $.L.str.6, %edi
xorl %esi, %esi
xorl %eax, %eax
callq printf
jmp .LBB1_15
.LBB1_7: # %._crit_edge52
movslq 12(%rsp), %rax
shlq $2, %rax
movq %rax, %xmm0
punpckldq .LCPI1_0(%rip), %xmm0 # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
subpd .LCPI1_1(%rip), %xmm0
movapd %xmm0, %xmm1
unpckhpd %xmm0, %xmm1 # xmm1 = xmm1[1],xmm0[1]
addsd %xmm0, %xmm1
mulsd %xmm2, %xmm1
cvttsd2si %xmm1, %rax
movq %rax, %rcx
sarq $63, %rcx
movsd .LCPI1_2(%rip), %xmm0 # xmm0 = mem[0],zero
subsd %xmm0, %xmm1
cvttsd2si %xmm1, %rsi
andq %rcx, %rsi
orq %rax, %rsi
leaq 64(%rsp), %rdi
callq hipMalloc
movsd .LCPI1_3(%rip), %xmm1 # xmm1 = mem[0],zero
movsd 16(%rsp), %xmm0 # xmm0 = mem[0],zero
mulsd %xmm1, %xmm0
cvttsd2si %xmm0, %rax
subsd .LCPI1_2(%rip), %xmm0
cvttsd2si %xmm0, %rcx
movq %rax, %rsi
sarq $63, %rsi
andq %rcx, %rsi
orq %rax, %rsi
leaq 24(%rsp), %rdi
callq hipMalloc
leaq 56(%rsp), %rdi
movl $4, %esi
callq hipMalloc
leaq 48(%rsp), %rdi
movl $8, %esi
callq hipMalloc
movslq 12(%rsp), %rax
shlq $2, %rax
movq %rax, %xmm0
punpckldq .LCPI1_0(%rip), %xmm0 # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
subpd .LCPI1_1(%rip), %xmm0
movapd %xmm0, %xmm1
unpckhpd %xmm0, %xmm1 # xmm1 = xmm1[1],xmm0[1]
addsd %xmm0, %xmm1
mulsd 16(%rsp), %xmm1
cvttsd2si %xmm1, %rax
movq 64(%rsp), %rdi
movq %rax, %rcx
sarq $63, %rcx
subsd .LCPI1_2(%rip), %xmm1
cvttsd2si %xmm1, %rdx
andq %rcx, %rdx
orq %rax, %rdx
movq %r12, %rsi
movl $1, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
movsd .LCPI1_3(%rip), %xmm1 # xmm1 = mem[0],zero
mulsd 16(%rsp), %xmm1
movapd %xmm1, %xmm0
subsd .LCPI1_2(%rip), %xmm0
cvttsd2si %xmm0, %rax
cvttsd2si %xmm1, %rcx
movq %rcx, %rdx
sarq $63, %rdx
andq %rax, %rdx
orq %rcx, %rdx
movq %r15, %rsi
movl $1, %ecx
callq hipMemcpy
movq 56(%rsp), %rdi
leaq 12(%rsp), %rsi
movl $4, %edx
movl $1, %ecx
callq hipMemcpy
movq 48(%rsp), %rdi
leaq 16(%rsp), %rsi
movl $8, %edx
movl $1, %ecx
callq hipMemcpy
movl 12(%rsp), %esi
movsd 16(%rsp), %xmm0 # xmm0 = mem[0],zero
movl $.L.str, %edi
movq 40(%rsp), %r14 # 8-byte Reload
movl %r14d, %edx
movl %r13d, %ecx
movb $1, %al
callq printf
movl %r14d, %edi
movabsq $4294967296, %rax # imm = 0x100000000
orq %rax, %rdi
movl %r13d, %edx
orq %rax, %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_9
# %bb.8:
movq 64(%rsp), %rax
movq 24(%rsp), %rcx
movq 56(%rsp), %rdx
movq 48(%rsp), %rsi
movq %rax, 152(%rsp)
movq %rcx, 144(%rsp)
movq %rdx, 136(%rsp)
movq %rsi, 128(%rsp)
leaq 152(%rsp), %rax
movq %rax, 160(%rsp)
leaq 144(%rsp), %rax
movq %rax, 168(%rsp)
leaq 136(%rsp), %rax
movq %rax, 176(%rsp)
leaq 128(%rsp), %rax
movq %rax, 184(%rsp)
leaq 112(%rsp), %rdi
leaq 96(%rsp), %rsi
leaq 88(%rsp), %rdx
leaq 80(%rsp), %rcx
callq __hipPopCallConfiguration
movq 112(%rsp), %rsi
movl 120(%rsp), %edx
movq 96(%rsp), %rcx
movl 104(%rsp), %r8d
leaq 160(%rsp), %r9
movl $_Z16calculations_invPfS_PiPl, %edi
pushq 80(%rsp)
.cfi_adjust_cfa_offset 8
pushq 96(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_9:
movq 24(%rsp), %rsi
movsd 16(%rsp), %xmm0 # xmm0 = mem[0],zero
mulsd .LCPI1_3(%rip), %xmm0
cvttsd2si %xmm0, %rax
movq %rax, %rcx
sarq $63, %rcx
subsd .LCPI1_2(%rip), %xmm0
cvttsd2si %xmm0, %rdx
andq %rcx, %rdx
orq %rax, %rdx
movq %r15, %rdi
movl $2, %ecx
callq hipMemcpy
movss (%r15), %xmm1 # xmm1 = mem[0],zero,zero,zero
movsd 16(%rsp), %xmm0 # xmm0 = mem[0],zero
xorpd %xmm2, %xmm2
ucomisd %xmm2, %xmm0
jbe .LBB1_10
# %bb.11: # %.lr.ph57.preheader
xorl %eax, %eax
.p2align 4, 0x90
.LBB1_12: # %.lr.ph57
# =>This Inner Loop Header: Depth=1
movss (%r15,%rax,4), %xmm3 # xmm3 = mem[0],zero,zero,zero
incq %rax
xorps %xmm2, %xmm2
cvtsi2sd %eax, %xmm2
minss %xmm1, %xmm3
ucomisd %xmm2, %xmm0
movaps %xmm3, %xmm1
ja .LBB1_12
jmp .LBB1_13
.LBB1_10:
movaps %xmm1, %xmm3
.LBB1_13: # %._crit_edge58
movss %xmm3, 40(%rsp) # 4-byte Spill
callq clock
subq 72(%rsp), %rax # 8-byte Folded Reload
xorps %xmm1, %xmm1
cvtsi2sd %rax, %xmm1
divsd .LCPI1_6(%rip), %xmm1
movss 40(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.1, %edi
movb $2, %al
callq printf
movq 64(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
movq 56(%rsp), %rdi
callq hipFree
movq 48(%rsp), %rdi
callq hipFree
movq %r12, %rdi
callq free
movq %r15, %rdi
callq free
.LBB1_15:
xorl %eax, %eax
addq $200, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z16calculations_invPfS_PiPl, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z16calculations_invPfS_PiPl,@object # @_Z16calculations_invPfS_PiPl
.section .rodata,"a",@progbits
.globl _Z16calculations_invPfS_PiPl
.p2align 3, 0x0
_Z16calculations_invPfS_PiPl:
.quad _Z31__device_stub__calculations_invPfS_PiPl
.size _Z16calculations_invPfS_PiPl, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Liczba block\303\263w : n [%d] Liczba iteracji [%f] , bloki [%d] watki : [%d]"
.size .L.str, 72
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "szukane minimum : %f - czas : %f \n "
.size .L.str.1, 37
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "Blocks - is number of used GPU blocks...max is %d\n"
.size .L.str.5, 51
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "Threads- is number of used GPU threads per one block ... max is %d \n"
.size .L.str.6, 70
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z16calculations_invPfS_PiPl"
.size .L__unnamed_1, 29
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Invalid program parameters plese type /main2 N I Blocks Threads where "
.size .Lstr, 71
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "N - is problem dimension"
.size .Lstr.1, 25
.type .Lstr.2,@object # @str.2
.Lstr.2:
.asciz "I - is number of iteratios"
.size .Lstr.2, 27
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z31__device_stub__calculations_invPfS_PiPl
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z16calculations_invPfS_PiPl
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_
|
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
|
.file "tmpxft_00137bbc_00000000-6_main3.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z42__device_stub__Z16calculations_invPfS_PiPlPfS_PiPl
.type _Z42__device_stub__Z16calculations_invPfS_PiPlPfS_PiPl, @function
_Z42__device_stub__Z16calculations_invPfS_PiPlPfS_PiPl:
.LFB2082:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %rcx, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movq %rsp, %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z16calculations_invPfS_PiPl(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z42__device_stub__Z16calculations_invPfS_PiPlPfS_PiPl, .-_Z42__device_stub__Z16calculations_invPfS_PiPlPfS_PiPl
.globl _Z16calculations_invPfS_PiPl
.type _Z16calculations_invPfS_PiPl, @function
_Z16calculations_invPfS_PiPl:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z42__device_stub__Z16calculations_invPfS_PiPlPfS_PiPl
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z16calculations_invPfS_PiPl, .-_Z16calculations_invPfS_PiPl
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC5:
.string "Liczba block\303\263w : n [%d] Liczba iteracji [%f] , bloki [%d] watki : [%d]"
.align 8
.LC7:
.string "szukane minimum : %f - czas : %f \n "
.align 8
.LC8:
.string "Invalid program parameters plese type /main2 N I Blocks Threads where \n"
.section .rodata.str1.1,"aMS",@progbits,1
.LC9:
.string "N - is problem dimension\n"
.LC10:
.string "I - is number of iteratios\n"
.section .rodata.str1.8
.align 8
.LC11:
.string "Blocks - is number of used GPU blocks...max is %d\n"
.align 8
.LC12:
.string "Threads- is number of used GPU threads per one block ... max is %d \n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $120, %rsp
.cfi_def_cfa_offset 176
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
cmpl $5, %edi
jne .L12
movq %rsi, %rbx
call clock@PLT
movq %rax, 16(%rsp)
movq 8(%rbx), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movl %eax, 36(%rsp)
movq 16(%rbx), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
pxor %xmm0, %xmm0
cvtsi2sdl %eax, %xmm0
pxor %xmm1, %xmm1
cvtsi2sdl 36(%rsp), %xmm1
call pow@PLT
movsd %xmm0, 40(%rsp)
movq 24(%rbx), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, 8(%rsp)
movl %eax, 28(%rsp)
movq 32(%rbx), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, %r15
movq 40(%rsp), %rbx
movslq 36(%rsp), %rax
salq $2, %rax
js .L13
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
.L14:
movq %rbx, %xmm4
mulsd %xmm4, %xmm0
comisd .LC0(%rip), %xmm0
jnb .L15
cvttsd2siq %xmm0, %rdi
.L16:
call malloc@PLT
movq %rax, %r14
movq %rbx, %xmm0
mulsd .LC1(%rip), %xmm0
comisd .LC0(%rip), %xmm0
jnb .L17
cvttsd2siq %xmm0, %rdi
.L18:
call malloc@PLT
movq %rax, %r13
movq %r14, %rbp
movl $0, %r12d
pxor %xmm0, %xmm0
movq %rbx, %xmm5
comisd %xmm0, %xmm5
ja .L19
.L20:
movslq 36(%rsp), %rax
salq $2, %rax
js .L24
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
.L25:
movq %rbx, %xmm6
mulsd %xmm6, %xmm0
comisd .LC0(%rip), %xmm0
jnb .L26
cvttsd2siq %xmm0, %rsi
.L27:
leaq 48(%rsp), %rdi
call cudaMalloc@PLT
movsd .LC1(%rip), %xmm0
mulsd 40(%rsp), %xmm0
comisd .LC0(%rip), %xmm0
jnb .L28
cvttsd2siq %xmm0, %rsi
.L29:
leaq 56(%rsp), %rdi
call cudaMalloc@PLT
leaq 64(%rsp), %rdi
movl $4, %esi
call cudaMalloc@PLT
leaq 72(%rsp), %rdi
movl $8, %esi
call cudaMalloc@PLT
movslq 36(%rsp), %rax
salq $2, %rax
js .L30
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
.L31:
mulsd 40(%rsp), %xmm0
comisd .LC0(%rip), %xmm0
jnb .L32
cvttsd2siq %xmm0, %rdx
.L33:
movl $1, %ecx
movq %r14, %rsi
movq 48(%rsp), %rdi
call cudaMemcpy@PLT
movsd .LC1(%rip), %xmm0
mulsd 40(%rsp), %xmm0
comisd .LC0(%rip), %xmm0
jnb .L34
cvttsd2siq %xmm0, %rdx
.L35:
movl $1, %ecx
movq %r13, %rsi
movq 56(%rsp), %rdi
call cudaMemcpy@PLT
leaq 36(%rsp), %rsi
movl $1, %ecx
movl $4, %edx
movq 64(%rsp), %rdi
call cudaMemcpy@PLT
leaq 40(%rsp), %rsi
movl $1, %ecx
movl $8, %edx
movq 72(%rsp), %rdi
call cudaMemcpy@PLT
movl %r15d, %r8d
movl 28(%rsp), %ecx
movsd 40(%rsp), %xmm0
movl 36(%rsp), %edx
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movl %r15d, 92(%rsp)
movl $1, 96(%rsp)
movl 8(%rsp), %eax
movl %eax, 80(%rsp)
movl $1, 84(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 92(%rsp), %rdx
movl $1, %ecx
movq 80(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L56
.L36:
movsd .LC1(%rip), %xmm0
mulsd 40(%rsp), %xmm0
comisd .LC0(%rip), %xmm0
jnb .L37
cvttsd2siq %xmm0, %rdx
.L38:
movl $2, %ecx
movq 56(%rsp), %rsi
movq %r13, %rdi
call cudaMemcpy@PLT
movss 0(%r13), %xmm7
movss %xmm7, 8(%rsp)
movsd 40(%rsp), %xmm1
movl $1, %eax
pxor %xmm0, %xmm0
comisd %xmm0, %xmm1
jbe .L39
.L42:
movss -4(%r13,%rax,4), %xmm0
minss 8(%rsp), %xmm0
movss %xmm0, 8(%rsp)
pxor %xmm0, %xmm0
cvtsi2sdl %eax, %xmm0
addq $1, %rax
comisd %xmm0, %xmm1
ja .L42
.L39:
call clock@PLT
movq 16(%rsp), %rcx
subq %rcx, %rax
pxor %xmm1, %xmm1
cvtsi2sdq %rax, %xmm1
pxor %xmm0, %xmm0
cvtss2sd 8(%rsp), %xmm0
divsd .LC6(%rip), %xmm1
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $2, %eax
call __printf_chk@PLT
movq 48(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rdi
call cudaFree@PLT
movq 64(%rsp), %rdi
call cudaFree@PLT
movq 72(%rsp), %rdi
call cudaFree@PLT
movq %r14, %rdi
call free@PLT
movq %r13, %rdi
call free@PLT
.L43:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L57
movl $0, %eax
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L13:
.cfi_restore_state
shrq %rax
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
addsd %xmm0, %xmm0
jmp .L14
.L15:
subsd .LC0(%rip), %xmm0
cvttsd2siq %xmm0, %rdi
btcq $63, %rdi
jmp .L16
.L17:
subsd .LC0(%rip), %xmm0
cvttsd2siq %xmm0, %rdi
btcq $63, %rdi
jmp .L18
.L21:
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
divss .LC3(%rip), %xmm0
subss .LC4(%rip), %xmm0
movss %xmm0, 0(%rbp)
addq $4, %rbp
addl $1, %ebx
cmpl %ebx, 36(%rsp)
jg .L21
.L23:
addl $1, %r12d
movq 40(%rsp), %rbx
pxor %xmm0, %xmm0
cvtsi2sdl %r12d, %xmm0
movq %rbx, %xmm2
comisd %xmm0, %xmm2
jbe .L20
.L19:
movl $0, %ebx
cmpl $0, 36(%rsp)
jg .L21
jmp .L23
.L24:
shrq %rax
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
addsd %xmm0, %xmm0
jmp .L25
.L26:
subsd .LC0(%rip), %xmm0
cvttsd2siq %xmm0, %rsi
btcq $63, %rsi
jmp .L27
.L28:
subsd .LC0(%rip), %xmm0
cvttsd2siq %xmm0, %rsi
btcq $63, %rsi
jmp .L29
.L30:
shrq %rax
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
addsd %xmm0, %xmm0
jmp .L31
.L32:
subsd .LC0(%rip), %xmm0
cvttsd2siq %xmm0, %rdx
btcq $63, %rdx
jmp .L33
.L34:
subsd .LC0(%rip), %xmm0
cvttsd2siq %xmm0, %rdx
btcq $63, %rdx
jmp .L35
.L56:
movq 72(%rsp), %rcx
movq 64(%rsp), %rdx
movq 56(%rsp), %rsi
movq 48(%rsp), %rdi
call _Z42__device_stub__Z16calculations_invPfS_PiPlPfS_PiPl
jmp .L36
.L37:
subsd .LC0(%rip), %xmm0
cvttsd2siq %xmm0, %rdx
btcq $63, %rdx
jmp .L38
.L12:
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq .LC10(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %edx
leaq .LC11(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %edx
leaq .LC12(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L43
.L57:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC13:
.string "_Z16calculations_invPfS_PiPl"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC13(%rip), %rdx
movq %rdx, %rcx
leaq _Z16calculations_invPfS_PiPl(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC0:
.long 0
.long 1138753536
.align 8
.LC1:
.long 0
.long 1074790400
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC3:
.long 1280101581
.align 4
.LC4:
.long 1101004800
.section .rodata.cst8
.align 8
.LC6:
.long 0
.long 1093567616
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4:
|
.text
.file "main3.hip"
.globl _Z31__device_stub__calculations_invPfS_PiPl # -- Begin function _Z31__device_stub__calculations_invPfS_PiPl
.p2align 4, 0x90
.type _Z31__device_stub__calculations_invPfS_PiPl,@function
_Z31__device_stub__calculations_invPfS_PiPl: # @_Z31__device_stub__calculations_invPfS_PiPl
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movq %rcx, 48(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rax
movq %rax, 104(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z16calculations_invPfS_PiPl, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z31__device_stub__calculations_invPfS_PiPl, .Lfunc_end0-_Z31__device_stub__calculations_invPfS_PiPl
.cfi_endproc
# -- End function
.section .rodata.cst16,"aM",@progbits,16
.p2align 4, 0x0 # -- Begin function main
.LCPI1_0:
.long 1127219200 # 0x43300000
.long 1160773632 # 0x45300000
.long 0 # 0x0
.long 0 # 0x0
.LCPI1_1:
.quad 0x4330000000000000 # double 4503599627370496
.quad 0x4530000000000000 # double 1.9342813113834067E+25
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0
.LCPI1_2:
.quad 0x43e0000000000000 # double 9.2233720368547758E+18
.LCPI1_3:
.quad 0x4010000000000000 # double 4
.LCPI1_6:
.quad 0x412e848000000000 # double 1.0E+6
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0
.LCPI1_4:
.long 0x4c4ccccd # float 53687092
.LCPI1_5:
.long 0xc1a00000 # float -20
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $200, %rsp
.cfi_def_cfa_offset 256
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
cmpl $5, %edi
jne .LBB1_14
# %bb.1:
movq %rsi, %r14
callq clock
movq %rax, 72(%rsp) # 8-byte Spill
movq 8(%r14), %rdi
xorl %ebp, %ebp
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %r15
movl %r15d, 12(%rsp)
movq 16(%r14), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
cvtsi2sd %eax, %xmm0
cvtsi2sd %r15d, %xmm1
callq pow
movsd %xmm0, 32(%rsp) # 8-byte Spill
movsd %xmm0, 16(%rsp)
movq 24(%r14), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, 40(%rsp) # 8-byte Spill
movq 32(%r14), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %r13
movslq 12(%rsp), %rax
shlq $2, %rax
movq %rax, %xmm0
punpckldq .LCPI1_0(%rip), %xmm0 # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
subpd .LCPI1_1(%rip), %xmm0
movapd %xmm0, %xmm1
unpckhpd %xmm0, %xmm1 # xmm1 = xmm1[1],xmm0[1]
addsd %xmm0, %xmm1
mulsd 32(%rsp), %xmm1 # 8-byte Folded Reload
cvttsd2si %xmm1, %rax
movq %rax, %rcx
sarq $63, %rcx
subsd .LCPI1_2(%rip), %xmm1
cvttsd2si %xmm1, %rdi
andq %rcx, %rdi
orq %rax, %rdi
callq malloc
movsd 16(%rsp), %xmm1 # xmm1 = mem[0],zero
movsd %xmm1, 32(%rsp) # 8-byte Spill
movsd .LCPI1_3(%rip), %xmm0 # xmm0 = mem[0],zero
mulsd %xmm1, %xmm0
cvttsd2si %xmm0, %rcx
movq %rax, %r12
subsd .LCPI1_2(%rip), %xmm0
cvttsd2si %xmm0, %rax
movq %rcx, %rdi
sarq $63, %rdi
andq %rax, %rdi
orq %rcx, %rdi
callq malloc
movsd 32(%rsp), %xmm2 # 8-byte Reload
# xmm2 = mem[0],zero
movq %rax, %r15
xorpd %xmm0, %xmm0
ucomisd %xmm0, %xmm2
jbe .LBB1_7
# %bb.2: # %.preheader.preheader
movq %r12, %r14
jmp .LBB1_3
.p2align 4, 0x90
.LBB1_6: # %._crit_edge
# in Loop: Header=BB1_3 Depth=1
incl %ebp
xorps %xmm0, %xmm0
cvtsi2sd %ebp, %xmm0
movsd 16(%rsp), %xmm2 # xmm2 = mem[0],zero
ucomisd %xmm0, %xmm2
jbe .LBB1_7
.LBB1_3: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB1_5 Depth 2
cmpl $0, 12(%rsp)
jle .LBB1_6
# %bb.4: # %.lr.ph.preheader
# in Loop: Header=BB1_3 Depth=1
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB1_5: # %.lr.ph
# Parent Loop BB1_3 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
movss .LCPI1_4(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
divss %xmm1, %xmm0
movss .LCPI1_5(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero
addss %xmm1, %xmm0
movss %xmm0, (%r14)
addq $4, %r14
incl %ebx
cmpl 12(%rsp), %ebx
jl .LBB1_5
jmp .LBB1_6
.LBB1_14:
movl $.Lstr, %edi
callq puts@PLT
movl $.Lstr.1, %edi
callq puts@PLT
movl $.Lstr.2, %edi
callq puts@PLT
movl $.L.str.5, %edi
xorl %esi, %esi
xorl %eax, %eax
callq printf
movl $.L.str.6, %edi
xorl %esi, %esi
xorl %eax, %eax
callq printf
jmp .LBB1_15
.LBB1_7: # %._crit_edge52
movslq 12(%rsp), %rax
shlq $2, %rax
movq %rax, %xmm0
punpckldq .LCPI1_0(%rip), %xmm0 # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
subpd .LCPI1_1(%rip), %xmm0
movapd %xmm0, %xmm1
unpckhpd %xmm0, %xmm1 # xmm1 = xmm1[1],xmm0[1]
addsd %xmm0, %xmm1
mulsd %xmm2, %xmm1
cvttsd2si %xmm1, %rax
movq %rax, %rcx
sarq $63, %rcx
movsd .LCPI1_2(%rip), %xmm0 # xmm0 = mem[0],zero
subsd %xmm0, %xmm1
cvttsd2si %xmm1, %rsi
andq %rcx, %rsi
orq %rax, %rsi
leaq 64(%rsp), %rdi
callq hipMalloc
movsd .LCPI1_3(%rip), %xmm1 # xmm1 = mem[0],zero
movsd 16(%rsp), %xmm0 # xmm0 = mem[0],zero
mulsd %xmm1, %xmm0
cvttsd2si %xmm0, %rax
subsd .LCPI1_2(%rip), %xmm0
cvttsd2si %xmm0, %rcx
movq %rax, %rsi
sarq $63, %rsi
andq %rcx, %rsi
orq %rax, %rsi
leaq 24(%rsp), %rdi
callq hipMalloc
leaq 56(%rsp), %rdi
movl $4, %esi
callq hipMalloc
leaq 48(%rsp), %rdi
movl $8, %esi
callq hipMalloc
movslq 12(%rsp), %rax
shlq $2, %rax
movq %rax, %xmm0
punpckldq .LCPI1_0(%rip), %xmm0 # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
subpd .LCPI1_1(%rip), %xmm0
movapd %xmm0, %xmm1
unpckhpd %xmm0, %xmm1 # xmm1 = xmm1[1],xmm0[1]
addsd %xmm0, %xmm1
mulsd 16(%rsp), %xmm1
cvttsd2si %xmm1, %rax
movq 64(%rsp), %rdi
movq %rax, %rcx
sarq $63, %rcx
subsd .LCPI1_2(%rip), %xmm1
cvttsd2si %xmm1, %rdx
andq %rcx, %rdx
orq %rax, %rdx
movq %r12, %rsi
movl $1, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
movsd .LCPI1_3(%rip), %xmm1 # xmm1 = mem[0],zero
mulsd 16(%rsp), %xmm1
movapd %xmm1, %xmm0
subsd .LCPI1_2(%rip), %xmm0
cvttsd2si %xmm0, %rax
cvttsd2si %xmm1, %rcx
movq %rcx, %rdx
sarq $63, %rdx
andq %rax, %rdx
orq %rcx, %rdx
movq %r15, %rsi
movl $1, %ecx
callq hipMemcpy
movq 56(%rsp), %rdi
leaq 12(%rsp), %rsi
movl $4, %edx
movl $1, %ecx
callq hipMemcpy
movq 48(%rsp), %rdi
leaq 16(%rsp), %rsi
movl $8, %edx
movl $1, %ecx
callq hipMemcpy
movl 12(%rsp), %esi
movsd 16(%rsp), %xmm0 # xmm0 = mem[0],zero
movl $.L.str, %edi
movq 40(%rsp), %r14 # 8-byte Reload
movl %r14d, %edx
movl %r13d, %ecx
movb $1, %al
callq printf
movl %r14d, %edi
movabsq $4294967296, %rax # imm = 0x100000000
orq %rax, %rdi
movl %r13d, %edx
orq %rax, %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_9
# %bb.8:
movq 64(%rsp), %rax
movq 24(%rsp), %rcx
movq 56(%rsp), %rdx
movq 48(%rsp), %rsi
movq %rax, 152(%rsp)
movq %rcx, 144(%rsp)
movq %rdx, 136(%rsp)
movq %rsi, 128(%rsp)
leaq 152(%rsp), %rax
movq %rax, 160(%rsp)
leaq 144(%rsp), %rax
movq %rax, 168(%rsp)
leaq 136(%rsp), %rax
movq %rax, 176(%rsp)
leaq 128(%rsp), %rax
movq %rax, 184(%rsp)
leaq 112(%rsp), %rdi
leaq 96(%rsp), %rsi
leaq 88(%rsp), %rdx
leaq 80(%rsp), %rcx
callq __hipPopCallConfiguration
movq 112(%rsp), %rsi
movl 120(%rsp), %edx
movq 96(%rsp), %rcx
movl 104(%rsp), %r8d
leaq 160(%rsp), %r9
movl $_Z16calculations_invPfS_PiPl, %edi
pushq 80(%rsp)
.cfi_adjust_cfa_offset 8
pushq 96(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_9:
movq 24(%rsp), %rsi
movsd 16(%rsp), %xmm0 # xmm0 = mem[0],zero
mulsd .LCPI1_3(%rip), %xmm0
cvttsd2si %xmm0, %rax
movq %rax, %rcx
sarq $63, %rcx
subsd .LCPI1_2(%rip), %xmm0
cvttsd2si %xmm0, %rdx
andq %rcx, %rdx
orq %rax, %rdx
movq %r15, %rdi
movl $2, %ecx
callq hipMemcpy
movss (%r15), %xmm1 # xmm1 = mem[0],zero,zero,zero
movsd 16(%rsp), %xmm0 # xmm0 = mem[0],zero
xorpd %xmm2, %xmm2
ucomisd %xmm2, %xmm0
jbe .LBB1_10
# %bb.11: # %.lr.ph57.preheader
xorl %eax, %eax
.p2align 4, 0x90
.LBB1_12: # %.lr.ph57
# =>This Inner Loop Header: Depth=1
movss (%r15,%rax,4), %xmm3 # xmm3 = mem[0],zero,zero,zero
incq %rax
xorps %xmm2, %xmm2
cvtsi2sd %eax, %xmm2
minss %xmm1, %xmm3
ucomisd %xmm2, %xmm0
movaps %xmm3, %xmm1
ja .LBB1_12
jmp .LBB1_13
.LBB1_10:
movaps %xmm1, %xmm3
.LBB1_13: # %._crit_edge58
movss %xmm3, 40(%rsp) # 4-byte Spill
callq clock
subq 72(%rsp), %rax # 8-byte Folded Reload
xorps %xmm1, %xmm1
cvtsi2sd %rax, %xmm1
divsd .LCPI1_6(%rip), %xmm1
movss 40(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.1, %edi
movb $2, %al
callq printf
movq 64(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
movq 56(%rsp), %rdi
callq hipFree
movq 48(%rsp), %rdi
callq hipFree
movq %r12, %rdi
callq free
movq %r15, %rdi
callq free
.LBB1_15:
xorl %eax, %eax
addq $200, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z16calculations_invPfS_PiPl, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z16calculations_invPfS_PiPl,@object # @_Z16calculations_invPfS_PiPl
.section .rodata,"a",@progbits
.globl _Z16calculations_invPfS_PiPl
.p2align 3, 0x0
_Z16calculations_invPfS_PiPl:
.quad _Z31__device_stub__calculations_invPfS_PiPl
.size _Z16calculations_invPfS_PiPl, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Liczba block\303\263w : n [%d] Liczba iteracji [%f] , bloki [%d] watki : [%d]"
.size .L.str, 72
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "szukane minimum : %f - czas : %f \n "
.size .L.str.1, 37
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "Blocks - is number of used GPU blocks...max is %d\n"
.size .L.str.5, 51
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "Threads- is number of used GPU threads per one block ... max is %d \n"
.size .L.str.6, 70
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z16calculations_invPfS_PiPl"
.size .L__unnamed_1, 29
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Invalid program parameters plese type /main2 N I Blocks Threads where "
.size .Lstr, 71
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "N - is problem dimension"
.size .Lstr.1, 25
.type .Lstr.2,@object # @str.2
.Lstr.2:
.asciz "I - is number of iteratios"
.size .Lstr.2, 27
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z31__device_stub__calculations_invPfS_PiPl
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z16calculations_invPfS_PiPl
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_
|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
|
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
/**
* Quickhull.cu
* Author: Michael Gruesen
* A quickhull implementation for NVIDIA GPGPU Compute Capability 2.0
*
* Serial QSort Code Adapted
* from : Aashish Barnwal
* source: http://www.geeksforgeeks.org/iterative-quick-sort/
*
* Parallel QSort Code Adapted
* from : Seiral QSort Code
**/
struct Point {
int x, y;
bool operator==(Point a){ return x == a.x && y == a.y;}
};
struct Edge {
Point p;
Point q;
bool operator==(Edge a){return p == a.p && q == a.q;}
};
__global__ void quickhull(Point* d_input, Point* d_output, int n){
}
__host__ void launchQuickHull(Point* d_input, Point* d_output, int n){
dim3 dimGrid;
dim3 dimBlock;
size_t sharedSize = n * sizeof(Edge);
cudaError_t cErr;
quickhull<<< dimBlock, dimGrid, sharedSize >>>(d_input, d_output, n);
cErr = cudaDeviceSynchronize();
if (cErr != cudaSuccess) fprintf(stderr, "%s\n", cudaGetErrorString(cErr));
}
void allocate(Point* d_input, Point* h_input, Edge* d_output, int n){
size_t memSize = n*sizeof(Point);
cudaMalloc((void**)&d_input, memSize);
cudaMalloc((void**)&d_output, memSize);
cudaMemcpy(d_input, h_input, memSize, cudaMemcpyHostToDevice);
}
void deallocate(Point* d_input, Point*d_output){
cudaFree(d_input);
cudaFree(d_output);
}
void printHull(Edge* out, int n){
for (int i = 0; i < n; i++)
fprintf(stderr, "%d,%d->%d,%d%s", out[i].p.x, out[i].p.y, out[i].q.x, out[i].q.y, ((i + 1 == n) ? "\n" : ", "));
}
void swap(Point* a, Point* b){
Point temp = *a;
*a = *b;
*b = temp;
}
/**
* Modification: Added extra conditional to allow
* sorting by x then y if a.x == b.x
**/
int partition(Point* input, int l, int r){
int x = input[r].x;
int y = input[r].y;
int i = (l - 1);
for (int j = l; j <= r-1; j++){
//was : if(input[j].x <= x)
if(input[j].x < x){
i++;
swap(&input[i], &input[j]);
}
else if (input[j].x == x){
if (input[j].y < y){
i++;
swap(&input[i], &input[j]);
}
}
}
swap(&input[i+1], &input[r]);
return i+1;
}
/**
* Code for iterative serial quicksort comes from
* http://www.geeksforgeeks.org/iterative-quick-sort/
* Author: Aashish Barnwal
**/
void quickSortIterative(Point* input, int l, int r){
int stack[r - l + 1];
int top = -1;
stack[++top] = l;
stack[++top] = r;
while (top >=0){
r = stack[top--];
l = stack[top--];
int p = partition(input, l, r);
if (p-1 > l){
stack[++top] = l;
stack[++top] = p-1;
}
if (p+1 < r){
stack[++top] = p+1;
stack[++top] = r;
}
}
}
void checkSort(Point* in, int n){
for(int i = 0; i < n; i++){
fprintf(stderr, "%d %d\n", in[i].x, in[i].y);
}
}
int computeDistance(Point a, Point b, Point c){
return (b.x - a.x)*(c.y-a.y)-(b.y-a.y)*(c.x-a.x);
}
int insert(Edge* hull, Point c, Edge old, int hullCounter){
//printHull(hull, hullCounter);
//fprintf(stderr, "Inserting %d,%d\n", c.x, c.y);
int insertIdx;
for(insertIdx = 0; insertIdx < hullCounter; insertIdx++){
if (hull[insertIdx] == old) break;
}
for(int i = hullCounter; i > insertIdx + 1; i--){
hull[i] = hull[i - 1];
}
Edge e;
e.q = old.q;
e.p = c;
old.q = c;
hull[insertIdx] = old;
hull[insertIdx + 1] = e;
//printHull(hull, hullCounter+1);
return ++hullCounter;
}
int serialFindHull(Point* set, Point a, Point b, Edge* hull, int setCounter, int setMaxIdx, int hullCounter){
if (setCounter == 0){
return hullCounter;
}
Point c = set[setMaxIdx];
Edge old;
old.p = a;
old.q = b;
hullCounter = insert(hull, c, old, hullCounter);
Point* s1;
Point* s2;
s1 = (Point*)malloc((setCounter-2)*sizeof(Point));
int s1counter = 0;
int s1MaxIdx = -1;
int s1MaxVal = 0;
s2 = (Point*)malloc((setCounter-2)*sizeof(Point));
int s2counter = 0;
int s2MaxIdx = -1;
int s2MaxVal = 0;
for (int i = 0; i < setCounter; i++){
Point p = set[i];
int res;
if ((res = computeDistance(a, c, p)) > 0){
s1[s1counter++] = p;
if (res > s1MaxVal){
s1MaxIdx = s1counter - 1;
s1MaxVal = res;
}
}
else if ((res = computeDistance(c, b, p)) > 0){
s2[s2counter++] = p;
if (res > s2MaxVal){
s2MaxIdx = s2counter - 1;
s2MaxVal = res;
}
}
}
hullCounter = serialFindHull(s1, a, c, hull, s1counter, s1MaxIdx, hullCounter);
hullCounter = serialFindHull(s2, c, b, hull, s2counter, s2MaxIdx, hullCounter);
free(s1);
free(s2);
return hullCounter;
}
int serialHull(Point* in, Edge* out, int n){
//memset(out, NULL, n);
int hullCounter = 0;
Edge a = {in[0], in[n-1]};
a.p = in[0];
a.q = in[n-1];
out[hullCounter++] = a;
Point* s1;
Point* s2;
s1 = (Point*)malloc((n-2)*sizeof(Point));
int s1counter = 0;
int s1MaxIdx = 0;
int s1MaxVal = 0;
s2 = (Point*)malloc((n-2)*sizeof(Point));
int s2counter = 0;
int s2MaxIdx = 0;
int s2MaxVal = 0;
for (int i = 1; i < n-2; i++){
Point p = in[i];
int res;
if ((res = computeDistance(in[0], in[n-1], p)) > 0){
s1[s1counter++] = p;
if (res > s1MaxVal) {
s1MaxIdx = s1counter - 1;
s1MaxVal = res;
}
}
else if ((res = computeDistance(in[n-1], in[0], p)) > 0){
s2[s2counter++] = p;
if (res > s2MaxVal){
s2MaxIdx = s2counter - 1;
s2MaxVal = res;
}
}
}
hullCounter = serialFindHull(s1, in[0], in[n-1], out, s1counter, s1MaxIdx, hullCounter);
a.p = in[n-1];
a.q = in[0];
out[hullCounter++] = a;
hullCounter = serialFindHull(s2, in[n-1], in[0], out, s2counter, s2MaxIdx, hullCounter);
free(s1);
free(s2);
return hullCounter;
}
void doSerialQuickHull(Point* in, Edge* out, int n){
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
//fprintf(stderr, "starting serial quick sort\n");
cudaEventRecord(start, 0);
quickSortIterative(in, 0, n-1);
//fprintf(stderr, "finished serial quick sort\n");
//checkSort(in, n);
//fprintf(stderr, "starting serial quick hull\n");
int hulls = serialHull(in, out, n);
cudaEventRecord(stop, 0);
//fprintf(stderr, "finsihed serial quick hull\n");
//printHull(out, hulls);
cudaEventElapsedTime(&time, start, stop);
fprintf(stderr, "serial quickhull runtime: %f ms\n", time);
}
int main(int argc, char** argv){
//get input passed as arg
FILE* input = fopen(argv[1], "r");
//get number of points
int numPoints;
fscanf(input, "%d", &numPoints);
size_t memSize = numPoints * sizeof(Point);
size_t outSize = numPoints * sizeof(Edge);
//host input/output
Point* h_input = (Point*)malloc(memSize);
Edge* h_output = (Edge*)malloc(outSize);
Edge* h_ref = (Edge*)malloc(outSize);
//initialize input
for (int i = 0; i < numPoints; i++){
Point p;
fscanf(input, "%d %d", &p.x, &p.y);
h_input[i] = p;
}
fprintf(stderr, "Read input\n");
doSerialQuickHull(h_input, h_ref, numPoints);
fprintf(stderr, "Quick Hull completed\n");
//device ptrs
//Point* d_input;
//Edge* d_output;
//allocate and copy to card
//allocate(d_input, h_input, d_output, numPoints);
//launch
//launchQuickHull(d_input, d_output, numPoints);
//copy back
//cudaMemcpy(h_output, d_output, numPoints*sizeof(Edge), cudaMemcpyDeviceToHost);
//deallocate card
//deallocate(d_input, d_output);
//print results
/*
for (int i = 0; i < numPoints; i++){
Edge e = h_output[i];
fprintf(stderr, "%d %d\n", e.x, e.y);
}
*/
return 0;
}
|
code for sm_80
Function : _Z9quickhullP5PointS0_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0020*/ BRA 0x20; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0030*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0040*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0050*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0060*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0070*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0080*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0090*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
|
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
/**
* Quickhull.cu
* Author: Michael Gruesen
* A quickhull implementation for NVIDIA GPGPU Compute Capability 2.0
*
* Serial QSort Code Adapted
* from : Aashish Barnwal
* source: http://www.geeksforgeeks.org/iterative-quick-sort/
*
* Parallel QSort Code Adapted
* from : Seiral QSort Code
**/
struct Point {
int x, y;
bool operator==(Point a){ return x == a.x && y == a.y;}
};
struct Edge {
Point p;
Point q;
bool operator==(Edge a){return p == a.p && q == a.q;}
};
__global__ void quickhull(Point* d_input, Point* d_output, int n){
}
__host__ void launchQuickHull(Point* d_input, Point* d_output, int n){
dim3 dimGrid;
dim3 dimBlock;
size_t sharedSize = n * sizeof(Edge);
cudaError_t cErr;
quickhull<<< dimBlock, dimGrid, sharedSize >>>(d_input, d_output, n);
cErr = cudaDeviceSynchronize();
if (cErr != cudaSuccess) fprintf(stderr, "%s\n", cudaGetErrorString(cErr));
}
void allocate(Point* d_input, Point* h_input, Edge* d_output, int n){
size_t memSize = n*sizeof(Point);
cudaMalloc((void**)&d_input, memSize);
cudaMalloc((void**)&d_output, memSize);
cudaMemcpy(d_input, h_input, memSize, cudaMemcpyHostToDevice);
}
void deallocate(Point* d_input, Point*d_output){
cudaFree(d_input);
cudaFree(d_output);
}
void printHull(Edge* out, int n){
for (int i = 0; i < n; i++)
fprintf(stderr, "%d,%d->%d,%d%s", out[i].p.x, out[i].p.y, out[i].q.x, out[i].q.y, ((i + 1 == n) ? "\n" : ", "));
}
void swap(Point* a, Point* b){
Point temp = *a;
*a = *b;
*b = temp;
}
/**
* Modification: Added extra conditional to allow
* sorting by x then y if a.x == b.x
**/
int partition(Point* input, int l, int r){
int x = input[r].x;
int y = input[r].y;
int i = (l - 1);
for (int j = l; j <= r-1; j++){
//was : if(input[j].x <= x)
if(input[j].x < x){
i++;
swap(&input[i], &input[j]);
}
else if (input[j].x == x){
if (input[j].y < y){
i++;
swap(&input[i], &input[j]);
}
}
}
swap(&input[i+1], &input[r]);
return i+1;
}
/**
* Code for iterative serial quicksort comes from
* http://www.geeksforgeeks.org/iterative-quick-sort/
* Author: Aashish Barnwal
**/
void quickSortIterative(Point* input, int l, int r){
int stack[r - l + 1];
int top = -1;
stack[++top] = l;
stack[++top] = r;
while (top >=0){
r = stack[top--];
l = stack[top--];
int p = partition(input, l, r);
if (p-1 > l){
stack[++top] = l;
stack[++top] = p-1;
}
if (p+1 < r){
stack[++top] = p+1;
stack[++top] = r;
}
}
}
void checkSort(Point* in, int n){
for(int i = 0; i < n; i++){
fprintf(stderr, "%d %d\n", in[i].x, in[i].y);
}
}
int computeDistance(Point a, Point b, Point c){
return (b.x - a.x)*(c.y-a.y)-(b.y-a.y)*(c.x-a.x);
}
int insert(Edge* hull, Point c, Edge old, int hullCounter){
//printHull(hull, hullCounter);
//fprintf(stderr, "Inserting %d,%d\n", c.x, c.y);
int insertIdx;
for(insertIdx = 0; insertIdx < hullCounter; insertIdx++){
if (hull[insertIdx] == old) break;
}
for(int i = hullCounter; i > insertIdx + 1; i--){
hull[i] = hull[i - 1];
}
Edge e;
e.q = old.q;
e.p = c;
old.q = c;
hull[insertIdx] = old;
hull[insertIdx + 1] = e;
//printHull(hull, hullCounter+1);
return ++hullCounter;
}
int serialFindHull(Point* set, Point a, Point b, Edge* hull, int setCounter, int setMaxIdx, int hullCounter){
if (setCounter == 0){
return hullCounter;
}
Point c = set[setMaxIdx];
Edge old;
old.p = a;
old.q = b;
hullCounter = insert(hull, c, old, hullCounter);
Point* s1;
Point* s2;
s1 = (Point*)malloc((setCounter-2)*sizeof(Point));
int s1counter = 0;
int s1MaxIdx = -1;
int s1MaxVal = 0;
s2 = (Point*)malloc((setCounter-2)*sizeof(Point));
int s2counter = 0;
int s2MaxIdx = -1;
int s2MaxVal = 0;
for (int i = 0; i < setCounter; i++){
Point p = set[i];
int res;
if ((res = computeDistance(a, c, p)) > 0){
s1[s1counter++] = p;
if (res > s1MaxVal){
s1MaxIdx = s1counter - 1;
s1MaxVal = res;
}
}
else if ((res = computeDistance(c, b, p)) > 0){
s2[s2counter++] = p;
if (res > s2MaxVal){
s2MaxIdx = s2counter - 1;
s2MaxVal = res;
}
}
}
hullCounter = serialFindHull(s1, a, c, hull, s1counter, s1MaxIdx, hullCounter);
hullCounter = serialFindHull(s2, c, b, hull, s2counter, s2MaxIdx, hullCounter);
free(s1);
free(s2);
return hullCounter;
}
int serialHull(Point* in, Edge* out, int n){
//memset(out, NULL, n);
int hullCounter = 0;
Edge a = {in[0], in[n-1]};
a.p = in[0];
a.q = in[n-1];
out[hullCounter++] = a;
Point* s1;
Point* s2;
s1 = (Point*)malloc((n-2)*sizeof(Point));
int s1counter = 0;
int s1MaxIdx = 0;
int s1MaxVal = 0;
s2 = (Point*)malloc((n-2)*sizeof(Point));
int s2counter = 0;
int s2MaxIdx = 0;
int s2MaxVal = 0;
for (int i = 1; i < n-2; i++){
Point p = in[i];
int res;
if ((res = computeDistance(in[0], in[n-1], p)) > 0){
s1[s1counter++] = p;
if (res > s1MaxVal) {
s1MaxIdx = s1counter - 1;
s1MaxVal = res;
}
}
else if ((res = computeDistance(in[n-1], in[0], p)) > 0){
s2[s2counter++] = p;
if (res > s2MaxVal){
s2MaxIdx = s2counter - 1;
s2MaxVal = res;
}
}
}
hullCounter = serialFindHull(s1, in[0], in[n-1], out, s1counter, s1MaxIdx, hullCounter);
a.p = in[n-1];
a.q = in[0];
out[hullCounter++] = a;
hullCounter = serialFindHull(s2, in[n-1], in[0], out, s2counter, s2MaxIdx, hullCounter);
free(s1);
free(s2);
return hullCounter;
}
void doSerialQuickHull(Point* in, Edge* out, int n){
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
//fprintf(stderr, "starting serial quick sort\n");
cudaEventRecord(start, 0);
quickSortIterative(in, 0, n-1);
//fprintf(stderr, "finished serial quick sort\n");
//checkSort(in, n);
//fprintf(stderr, "starting serial quick hull\n");
int hulls = serialHull(in, out, n);
cudaEventRecord(stop, 0);
//fprintf(stderr, "finsihed serial quick hull\n");
//printHull(out, hulls);
cudaEventElapsedTime(&time, start, stop);
fprintf(stderr, "serial quickhull runtime: %f ms\n", time);
}
int main(int argc, char** argv){
//get input passed as arg
FILE* input = fopen(argv[1], "r");
//get number of points
int numPoints;
fscanf(input, "%d", &numPoints);
size_t memSize = numPoints * sizeof(Point);
size_t outSize = numPoints * sizeof(Edge);
//host input/output
Point* h_input = (Point*)malloc(memSize);
Edge* h_output = (Edge*)malloc(outSize);
Edge* h_ref = (Edge*)malloc(outSize);
//initialize input
for (int i = 0; i < numPoints; i++){
Point p;
fscanf(input, "%d %d", &p.x, &p.y);
h_input[i] = p;
}
fprintf(stderr, "Read input\n");
doSerialQuickHull(h_input, h_ref, numPoints);
fprintf(stderr, "Quick Hull completed\n");
//device ptrs
//Point* d_input;
//Edge* d_output;
//allocate and copy to card
//allocate(d_input, h_input, d_output, numPoints);
//launch
//launchQuickHull(d_input, d_output, numPoints);
//copy back
//cudaMemcpy(h_output, d_output, numPoints*sizeof(Edge), cudaMemcpyDeviceToHost);
//deallocate card
//deallocate(d_input, d_output);
//print results
/*
for (int i = 0; i < numPoints; i++){
Edge e = h_output[i];
fprintf(stderr, "%d %d\n", e.x, e.y);
}
*/
return 0;
}
|
.file "tmpxft_000eaf2b_00000000-6_quickhull.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2075:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2075:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z8allocateP5PointS0_P4Edgei
.type _Z8allocateP5PointS0_P4Edgei, @function
_Z8allocateP5PointS0_P4Edgei:
.LFB2060:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $24, %rsp
.cfi_def_cfa_offset 48
movq %rdi, 8(%rsp)
movq %rsi, %rbp
movq %rdx, (%rsp)
movslq %ecx, %rbx
salq $3, %rbx
leaq 8(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movq %rsp, %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbx, %rdx
movq %rbp, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
addq $24, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _Z8allocateP5PointS0_P4Edgei, .-_Z8allocateP5PointS0_P4Edgei
.globl _Z10deallocateP5PointS0_
.type _Z10deallocateP5PointS0_, @function
_Z10deallocateP5PointS0_:
.LFB2061:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
movq %rsi, %rbx
call cudaFree@PLT
movq %rbx, %rdi
call cudaFree@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _Z10deallocateP5PointS0_, .-_Z10deallocateP5PointS0_
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "%d,%d->%d,%d%s"
.LC1:
.string ", "
.LC2:
.string "\n"
.text
.globl _Z9printHullP4Edgei
.type _Z9printHullP4Edgei, @function
_Z9printHullP4Edgei:
.LFB2062:
.cfi_startproc
endbr64
testl %esi, %esi
jle .L12
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $8, %rsp
.cfi_def_cfa_offset 64
movq %rdi, %r13
movl %esi, %r12d
movq %rdi, %rbx
movl $0, %eax
leaq .LC1(%rip), %r15
leaq .LC0(%rip), %r14
jmp .L10
.L15:
movl 8(%rbx), %r9d
movl 4(%rbx), %r8d
movl (%rbx), %ecx
pushq %r15
.cfi_def_cfa_offset 72
movl 12(%rbx), %eax
pushq %rax
.cfi_def_cfa_offset 80
movq %r14, %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
addq $16, %rbx
addq $16, %rsp
.cfi_def_cfa_offset 64
movl %ebp, %eax
.L10:
leal 1(%rax), %ebp
cmpl %r12d, %ebp
jne .L15
cltq
salq $4, %rax
addq %rax, %r13
movl 0(%r13), %ecx
leaq .LC2(%rip), %rax
pushq %rax
.cfi_def_cfa_offset 72
movl 12(%r13), %eax
pushq %rax
.cfi_def_cfa_offset 80
movl 8(%r13), %r9d
movl 4(%r13), %r8d
leaq .LC0(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
addq $24, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L12:
.cfi_restore 3
.cfi_restore 6
.cfi_restore 12
.cfi_restore 13
.cfi_restore 14
.cfi_restore 15
ret
.cfi_endproc
.LFE2062:
.size _Z9printHullP4Edgei, .-_Z9printHullP4Edgei
.globl _Z4swapP5PointS0_
.type _Z4swapP5PointS0_, @function
_Z4swapP5PointS0_:
.LFB2063:
.cfi_startproc
endbr64
movl (%rdi), %edx
movl 4(%rdi), %eax
movq (%rsi), %rcx
movq %rcx, (%rdi)
movl %edx, (%rsi)
movl %eax, 4(%rsi)
ret
.cfi_endproc
.LFE2063:
.size _Z4swapP5PointS0_, .-_Z4swapP5PointS0_
.globl _Z9partitionP5Pointii
.type _Z9partitionP5Pointii, @function
_Z9partitionP5Pointii:
.LFB2064:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
movq %rdi, %r8
movslq %edx, %rax
leaq (%rdi,%rax,8), %r10
movl (%r10), %edi
movl 4(%r10), %r11d
leal -1(%rsi), %eax
cmpl %esi, %edx
jle .L18
movslq %esi, %r9
leaq (%r8,%r9,8), %rcx
subl %esi, %edx
addq %r9, %rdx
leaq (%r8,%rdx,8), %r9
jmp .L21
.L24:
addl $1, %eax
movslq %eax, %rdx
leaq (%r8,%rdx,8), %rdx
movl (%rdx), %ebx
movl 4(%rdx), %esi
movq (%rcx), %rbp
movq %rbp, (%rdx)
movl %ebx, (%rcx)
movl %esi, 4(%rcx)
.L20:
addq $8, %rcx
cmpq %r9, %rcx
je .L18
.L21:
movl (%rcx), %edx
cmpl %edi, %edx
jl .L24
jne .L20
cmpl %r11d, 4(%rcx)
jge .L20
addl $1, %eax
movslq %eax, %rdx
leaq (%r8,%rdx,8), %rdx
movl (%rdx), %ebp
movl 4(%rdx), %ebx
movq (%rcx), %r14
movq %r14, (%rdx)
movl %ebp, (%rcx)
movl %ebx, 4(%rcx)
jmp .L20
.L18:
movslq %eax, %rdx
leaq 8(%r8,%rdx,8), %rdx
movl (%rdx), %esi
movl 4(%rdx), %ecx
movq (%r10), %rdi
movq %rdi, (%rdx)
movl %esi, (%r10)
movl %ecx, 4(%r10)
addl $1, %eax
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2064:
.size _Z9partitionP5Pointii, .-_Z9partitionP5Pointii
.globl _Z18quickSortIterativeP5Pointii
.type _Z18quickSortIterativeP5Pointii, @function
_Z18quickSortIterativeP5Pointii:
.LFB2065:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
movq %rsp, %rbp
.cfi_def_cfa_register 6
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $40, %rsp
.cfi_offset 15, -24
.cfi_offset 14, -32
.cfi_offset 13, -40
.cfi_offset 12, -48
.cfi_offset 3, -56
movq %rdi, -72(%rbp)
movq %fs:40, %rax
movq %rax, -56(%rbp)
xorl %eax, %eax
movl %edx, %eax
subl %esi, %eax
addl $1, %eax
cltq
leaq 15(,%rax,4), %rax
movq %rax, %rdi
andq $-16, %rdi
andq $-4096, %rax
movq %rsp, %rcx
subq %rax, %rcx
.L26:
cmpq %rcx, %rsp
je .L27
subq $4096, %rsp
orq $0, 4088(%rsp)
jmp .L26
.L27:
movq %rdi, %rax
andl $4095, %eax
subq %rax, %rsp
testq %rax, %rax
je .L28
orq $0, -8(%rsp,%rax)
.L28:
leaq 3(%rsp), %r15
movq %r15, %rax
shrq $2, %rax
andq $-4, %r15
movl %esi, 0(,%rax,4)
movl %edx, 4(,%rax,4)
movl $1, %r12d
jmp .L33
.L30:
movl %edx, (%rbx)
.L31:
addl $1, %eax
cmpl %r13d, %eax
jl .L37
testl %r12d, %r12d
js .L38
.L33:
movslq %r12d, %rax
leaq (%r15,%rax,4), %rbx
movl (%rbx), %r13d
movl -4(%rbx), %r14d
movl %r13d, %edx
movl %r14d, %esi
movq -72(%rbp), %rdi
call _Z9partitionP5Pointii
leal -1(%rax), %edx
cmpl %r14d, %edx
jg .L30
subl $2, %r12d
jmp .L31
.L37:
leal 1(%r12), %edx
movslq %edx, %rdx
movl %eax, (%r15,%rdx,4)
addl $2, %r12d
movslq %r12d, %rax
movl %r13d, (%r15,%rax,4)
jmp .L33
.L38:
movq -56(%rbp), %rax
subq %fs:40, %rax
jne .L39
leaq -40(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
.cfi_remember_state
.cfi_def_cfa 7, 8
ret
.L39:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2065:
.size _Z18quickSortIterativeP5Pointii, .-_Z18quickSortIterativeP5Pointii
.section .rodata.str1.1
.LC3:
.string "%d %d\n"
.text
.globl _Z9checkSortP5Pointi
.type _Z9checkSortP5Pointi, @function
_Z9checkSortP5Pointi:
.LFB2066:
.cfi_startproc
endbr64
testl %esi, %esi
jle .L45
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
movq %rdi, %rbx
movslq %esi, %rsi
leaq (%rdi,%rsi,8), %r12
leaq .LC3(%rip), %rbp
.L42:
movl (%rbx), %ecx
movl 4(%rbx), %r8d
movq %rbp, %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
addq $8, %rbx
cmpq %r12, %rbx
jne .L42
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L45:
.cfi_restore 3
.cfi_restore 6
.cfi_restore 12
ret
.cfi_endproc
.LFE2066:
.size _Z9checkSortP5Pointi, .-_Z9checkSortP5Pointi
.globl _Z15computeDistance5PointS_S_
.type _Z15computeDistance5PointS_S_, @function
_Z15computeDistance5PointS_S_:
.LFB2067:
.cfi_startproc
endbr64
movq %rdi, %r8
sarq $32, %r8
movl %esi, %eax
subl %edi, %eax
movq %rdx, %rcx
sarq $32, %rcx
subl %r8d, %ecx
imull %ecx, %eax
sarq $32, %rsi
subl %r8d, %esi
subl %edi, %edx
imull %edx, %esi
subl %esi, %eax
ret
.cfi_endproc
.LFE2067:
.size _Z15computeDistance5PointS_S_, .-_Z15computeDistance5PointS_S_
.globl _Z6insertP4Edge5PointS_i
.type _Z6insertP4Edge5PointS_i, @function
_Z6insertP4Edge5PointS_i:
.LFB2068:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
movq %rdi, %r9
movq %rsi, %r10
movq %rdx, %r11
movq %rsi, %rbp
sarq $32, %rbp
movl %ecx, %ebx
sarq $32, %rcx
testl %r8d, %r8d
jle .L55
movl %ecx, %r13d
movl %edx, %edi
movq %rdx, %rax
shrq $32, %rax
movl %eax, %r12d
movq %r9, %rdx
movl $0, %esi
jmp .L53
.L51:
addl $1, %esi
addq $16, %rdx
cmpl %esi, %r8d
je .L59
.L53:
cmpl %edi, (%rdx)
jne .L51
cmpl %r12d, 4(%rdx)
jne .L51
cmpl 8(%rdx), %ebx
jne .L51
cmpl 12(%rdx), %r13d
jne .L51
leal 1(%rsi), %eax
cmpl %eax, %r8d
jle .L50
movslq %r8d, %rax
salq $4, %rax
leaq (%r9,%rax), %rdx
leaq -16(%r9,%rax), %rax
leal -2(%r8), %edi
subl %esi, %edi
salq $4, %rdi
subq %rdi, %rax
.L54:
movdqu -16(%rdx), %xmm0
movups %xmm0, (%rdx)
subq $16, %rdx
cmpq %rax, %rdx
jne .L54
.L50:
movslq %esi, %rsi
salq $4, %rsi
movq %rbp, %rdx
salq $32, %rdx
movl %r10d, %eax
orq %rdx, %rax
movq %r11, (%r9,%rsi)
movq %rax, 8(%r9,%rsi)
leaq 16(%r9,%rsi), %rax
movl %r10d, (%rax)
movl %ebp, 4(%rax)
movl %ebx, 8(%rax)
movl %ecx, 12(%rax)
leal 1(%r8), %eax
popq %rbx
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L59:
.cfi_restore_state
movl %r8d, %esi
jmp .L50
.L55:
movl $0, %esi
jmp .L50
.cfi_endproc
.LFE2068:
.size _Z6insertP4Edge5PointS_i, .-_Z6insertP4Edge5PointS_i
.globl _Z14serialFindHullP5PointS_S_P4Edgeiii
.type _Z14serialFindHullP5PointS_S_P4Edgeiii, @function
_Z14serialFindHullP5PointS_S_P4Edgeiii:
.LFB2069:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $72, %rsp
.cfi_def_cfa_offset 128
movq %rdx, 24(%rsp)
movq %rcx, 48(%rsp)
movl 128(%rsp), %r12d
testl %r8d, %r8d
jne .L71
.L61:
movl %r12d, %eax
addq $72, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L71:
.cfi_restore_state
movq %rdi, %rbp
movq %rsi, %r13
movq %rdx, %rax
movq %rcx, %rdi
movl %r8d, %ebx
movslq %r9d, %r9
movq 0(%rbp,%r9,8), %r14
movl %r12d, %r8d
movq %rsi, %rdx
movq %rax, %rcx
movq %r14, %rsi
call _Z6insertP4Edge5PointS_i
movl %eax, 60(%rsp)
leal -2(%rbx), %r12d
movslq %r12d, %r12
salq $3, %r12
movq %r12, %rdi
call malloc@PLT
movq %rax, 16(%rsp)
movq %r12, %rdi
call malloc@PLT
movq %rax, 32(%rsp)
testl %ebx, %ebx
jle .L66
movq %rbp, %rbx
leaq 16(%rbp,%r12), %r15
movl $0, 44(%rsp)
movl $-1, 56(%rsp)
movl $0, 12(%rsp)
movl $0, 8(%rsp)
movl $-1, 40(%rsp)
movl $0, %r12d
jmp .L65
.L63:
movq %rbp, %rdx
movq 24(%rsp), %rsi
movq %r14, %rdi
call _Z15computeDistance5PointS_S_
testl %eax, %eax
jle .L64
movl 12(%rsp), %esi
leal 1(%rsi), %ecx
movslq %esi, %rdx
movq 32(%rsp), %rdi
movq %rbp, (%rdi,%rdx,8)
movl 44(%rsp), %edx
cmpl %edx, %eax
jg .L68
movl %ecx, 12(%rsp)
jmp .L64
.L67:
movl %eax, 8(%rsp)
movl %r12d, 40(%rsp)
movl %ecx, %r12d
.L64:
addq $8, %rbx
cmpq %r15, %rbx
je .L62
.L65:
movq (%rbx), %rbp
movq %rbp, %rdx
movq %r14, %rsi
movq %r13, %rdi
call _Z15computeDistance5PointS_S_
testl %eax, %eax
jle .L63
leal 1(%r12), %ecx
movslq %r12d, %rdx
movq 16(%rsp), %rsi
movq %rbp, (%rsi,%rdx,8)
movl 8(%rsp), %edi
cmpl %edi, %eax
jg .L67
movl %ecx, %r12d
jmp .L64
.L68:
movl %eax, 44(%rsp)
movl 12(%rsp), %eax
movl %eax, 56(%rsp)
movl %ecx, 12(%rsp)
jmp .L64
.L66:
movl $-1, 56(%rsp)
movl $0, 12(%rsp)
movl $-1, 40(%rsp)
movl $0, %r12d
.L62:
subq $8, %rsp
.cfi_def_cfa_offset 136
movl 68(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 144
movl 56(%rsp), %r9d
movl %r12d, %r8d
movq 64(%rsp), %rbx
movq %rbx, %rcx
movq %r14, %rdx
movq %r13, %rsi
movq 32(%rsp), %r15
movq %r15, %rdi
call _Z14serialFindHullP5PointS_S_P4Edgeiii
movl %eax, (%rsp)
movl 72(%rsp), %r9d
movl 28(%rsp), %r8d
movq %rbx, %rcx
movq 40(%rsp), %rdx
movq %r14, %rsi
movq 48(%rsp), %rbx
movq %rbx, %rdi
call _Z14serialFindHullP5PointS_S_P4Edgeiii
movl %eax, %r12d
movq %r15, %rdi
call free@PLT
movq %rbx, %rdi
call free@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L61
.cfi_endproc
.LFE2069:
.size _Z14serialFindHullP5PointS_S_P4Edgeiii, .-_Z14serialFindHullP5PointS_S_P4Edgeiii
.globl _Z10serialHullP5PointP4Edgei
.type _Z10serialHullP5PointP4Edgei, @function
_Z10serialHullP5PointP4Edgei:
.LFB2070:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $56, %rsp
.cfi_def_cfa_offset 112
movq %rdi, %r12
movq %rsi, %rbp
movl %edx, %r13d
movslq %edx, %rax
leaq -8(%rdi,%rax,8), %r14
movl (%rdi), %esi
movl 4(%rdi), %ecx
movl (%r14), %edx
movl 4(%r14), %eax
movl %esi, 0(%rbp)
movl %ecx, 4(%rbp)
movl %edx, 8(%rbp)
movl %eax, 12(%rbp)
leal -2(%r13), %r15d
movslq %r15d, %rbx
salq $3, %rbx
movq %rbx, %rdi
call malloc@PLT
movq %rax, 8(%rsp)
movq %rbx, %rdi
call malloc@PLT
movq %rax, 24(%rsp)
cmpl $1, %r15d
jle .L77
leaq 8(%r12), %rbx
leal -4(%r13), %eax
leaq 16(%r12,%rax,8), %r15
movl $0, 32(%rsp)
movl $0, 36(%rsp)
movl $0, 16(%rsp)
movl $0, 4(%rsp)
movl $0, 20(%rsp)
movl $0, %r13d
movq %rbp, 40(%rsp)
jmp .L76
.L74:
movq (%r12), %rsi
movq (%r14), %rdi
movq %rbp, %rdx
call _Z15computeDistance5PointS_S_
testl %eax, %eax
jle .L75
movl 16(%rsp), %esi
leal 1(%rsi), %ecx
movslq %esi, %rdx
movq 24(%rsp), %rdi
movq %rbp, (%rdi,%rdx,8)
movl 32(%rsp), %edx
cmpl %edx, %eax
jg .L79
movl %ecx, 16(%rsp)
jmp .L75
.L78:
movl %eax, 4(%rsp)
movl %r13d, 20(%rsp)
movl %ecx, %r13d
.L75:
addq $8, %rbx
cmpq %r15, %rbx
je .L82
.L76:
movq (%rbx), %rbp
movq (%r14), %rsi
movq (%r12), %rdi
movq %rbp, %rdx
call _Z15computeDistance5PointS_S_
testl %eax, %eax
jle .L74
leal 1(%r13), %ecx
movslq %r13d, %rdx
movq 8(%rsp), %rdi
movq %rbp, (%rdi,%rdx,8)
movl 4(%rsp), %edx
cmpl %edx, %eax
jg .L78
movl %ecx, %r13d
jmp .L75
.L79:
movl %eax, 32(%rsp)
movl 16(%rsp), %eax
movl %eax, 36(%rsp)
movl %ecx, 16(%rsp)
jmp .L75
.L82:
movq 40(%rsp), %rbp
.L73:
movq (%r14), %rdx
movq (%r12), %rsi
subq $8, %rsp
.cfi_def_cfa_offset 120
pushq $1
.cfi_def_cfa_offset 128
movl 36(%rsp), %r9d
movl %r13d, %r8d
movq %rbp, %rcx
movq 24(%rsp), %r13
movq %r13, %rdi
call _Z14serialFindHullP5PointS_S_P4Edgeiii
movl %eax, %edx
movl (%r14), %r8d
movl 4(%r14), %edi
movl (%r12), %esi
movl 4(%r12), %ecx
cltq
salq $4, %rax
addq %rbp, %rax
movl %r8d, (%rax)
movl %edi, 4(%rax)
movl %esi, 8(%rax)
movl %ecx, 12(%rax)
movq (%r12), %rax
movq (%r14), %rsi
addl $1, %edx
movl %edx, (%rsp)
movl 52(%rsp), %r9d
movl 32(%rsp), %r8d
movq %rbp, %rcx
movq %rax, %rdx
movq 40(%rsp), %r15
movq %r15, %rdi
call _Z14serialFindHullP5PointS_S_P4Edgeiii
movl %eax, %ebx
movq %r13, %rdi
call free@PLT
movq %r15, %rdi
call free@PLT
movl %ebx, %eax
addq $72, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L77:
.cfi_def_cfa_offset 112
movl $0, 36(%rsp)
movl $0, 16(%rsp)
movl $0, 20(%rsp)
movl $0, %r13d
jmp .L73
.cfi_endproc
.LFE2070:
.size _Z10serialHullP5PointP4Edgei, .-_Z10serialHullP5PointP4Edgei
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC4:
.string "serial quickhull runtime: %f ms\n"
.text
.globl _Z17doSerialQuickHullP5PointP4Edgei
.type _Z17doSerialQuickHullP5PointP4Edgei, @function
_Z17doSerialQuickHullP5PointP4Edgei:
.LFB2071:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $32, %rsp
.cfi_def_cfa_offset 64
movq %rdi, %rbx
movq %rsi, %r12
movl %edx, %ebp
movq %fs:40, %rax
movq %rax, 24(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rdi
call cudaEventCreate@PLT
leaq 16(%rsp), %rdi
call cudaEventCreate@PLT
movl $0, %esi
movq 8(%rsp), %rdi
call cudaEventRecord@PLT
leal -1(%rbp), %edx
movl $0, %esi
movq %rbx, %rdi
call _Z18quickSortIterativeP5Pointii
movl %ebp, %edx
movq %r12, %rsi
movq %rbx, %rdi
call _Z10serialHullP5PointP4Edgei
movl $0, %esi
movq 16(%rsp), %rdi
call cudaEventRecord@PLT
leaq 4(%rsp), %rdi
movq 16(%rsp), %rdx
movq 8(%rsp), %rsi
call cudaEventElapsedTime@PLT
pxor %xmm0, %xmm0
cvtss2sd 4(%rsp), %xmm0
leaq .LC4(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $1, %eax
call __fprintf_chk@PLT
movq 24(%rsp), %rax
subq %fs:40, %rax
jne .L86
addq $32, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L86:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2071:
.size _Z17doSerialQuickHullP5PointP4Edgei, .-_Z17doSerialQuickHullP5PointP4Edgei
.section .rodata.str1.1
.LC5:
.string "r"
.LC6:
.string "%d"
.LC7:
.string "%d %d"
.LC8:
.string "Read input\n"
.LC9:
.string "Quick Hull completed\n"
.text
.globl main
.type main, @function
main:
.LFB2072:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $32, %rsp
.cfi_def_cfa_offset 80
movq %fs:40, %rax
movq %rax, 24(%rsp)
xorl %eax, %eax
movq 8(%rsi), %rdi
leaq .LC5(%rip), %rsi
call fopen@PLT
movq %rax, %r12
leaq 12(%rsp), %rdx
leaq .LC6(%rip), %rsi
movq %rax, %rdi
movl $0, %eax
call __isoc23_fscanf@PLT
movl 12(%rsp), %r13d
movslq %r13d, %rbx
leaq 0(,%rbx,8), %rdi
call malloc@PLT
movq %rax, %rbp
movq %rbx, %rdi
salq $4, %rdi
call malloc@PLT
movq %rax, %r14
testl %r13d, %r13d
jle .L88
movl $0, %ebx
leaq .LC7(%rip), %r13
.L89:
leaq 16(%rsp), %rdx
leaq 20(%rsp), %rcx
movq %r13, %rsi
movq %r12, %rdi
movl $0, %eax
call __isoc23_fscanf@PLT
movq 16(%rsp), %rax
movq %rax, 0(%rbp,%rbx,8)
addq $1, %rbx
cmpl %ebx, 12(%rsp)
jg .L89
.L88:
leaq .LC8(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl 12(%rsp), %edx
movq %r14, %rsi
movq %rbp, %rdi
call _Z17doSerialQuickHullP5PointP4Edgei
leaq .LC9(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movq 24(%rsp), %rax
subq %fs:40, %rax
jne .L93
movl $0, %eax
addq $32, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L93:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2072:
.size main, .-main
.globl _Z37__device_stub__Z9quickhullP5PointS0_iP5PointS0_i
.type _Z37__device_stub__Z9quickhullP5PointS0_iP5PointS0_i, @function
_Z37__device_stub__Z9quickhullP5PointS0_iP5PointS0_i:
.LFB2097:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L98
.L94:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L99
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L98:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z9quickhullP5PointS0_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L94
.L99:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2097:
.size _Z37__device_stub__Z9quickhullP5PointS0_iP5PointS0_i, .-_Z37__device_stub__Z9quickhullP5PointS0_iP5PointS0_i
.globl _Z9quickhullP5PointS0_i
.type _Z9quickhullP5PointS0_i, @function
_Z9quickhullP5PointS0_i:
.LFB2098:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z37__device_stub__Z9quickhullP5PointS0_iP5PointS0_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2098:
.size _Z9quickhullP5PointS0_i, .-_Z9quickhullP5PointS0_i
.section .rodata.str1.1
.LC10:
.string "%s\n"
.text
.globl _Z15launchQuickHullP5PointS0_i
.type _Z15launchQuickHullP5PointS0_i, @function
_Z15launchQuickHullP5PointS0_i:
.LFB2059:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $32, %rsp
.cfi_def_cfa_offset 64
movq %rdi, %rbp
movq %rsi, %r12
movl %edx, %ebx
movl $1, 8(%rsp)
movl $1, 12(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movslq %edx, %r8
movl $0, %r9d
salq $4, %r8
movq 8(%rsp), %rdx
movl $1, %ecx
movq 20(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L106
.L103:
call cudaDeviceSynchronize@PLT
testl %eax, %eax
jne .L107
.L102:
addq $32, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L106:
.cfi_restore_state
movl %ebx, %edx
movq %r12, %rsi
movq %rbp, %rdi
call _Z37__device_stub__Z9quickhullP5PointS0_iP5PointS0_i
jmp .L103
.L107:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rcx
leaq .LC10(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
jmp .L102
.cfi_endproc
.LFE2059:
.size _Z15launchQuickHullP5PointS0_i, .-_Z15launchQuickHullP5PointS0_i
.section .rodata.str1.1
.LC11:
.string "_Z9quickhullP5PointS0_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2100:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC11(%rip), %rdx
movq %rdx, %rcx
leaq _Z9quickhullP5PointS0_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2100:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4:
|
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
|
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
/**
* Quickhull.cu
* Author: Michael Gruesen
* A quickhull implementation for NVIDIA GPGPU Compute Capability 2.0
*
* Serial QSort Code Adapted
* from : Aashish Barnwal
* source: http://www.geeksforgeeks.org/iterative-quick-sort/
*
* Parallel QSort Code Adapted
* from : Seiral QSort Code
**/
struct Point {
int x, y;
bool operator==(Point a){ return x == a.x && y == a.y;}
};
struct Edge {
Point p;
Point q;
bool operator==(Edge a){return p == a.p && q == a.q;}
};
__global__ void quickhull(Point* d_input, Point* d_output, int n){
}
__host__ void launchQuickHull(Point* d_input, Point* d_output, int n){
dim3 dimGrid;
dim3 dimBlock;
size_t sharedSize = n * sizeof(Edge);
cudaError_t cErr;
quickhull<<< dimBlock, dimGrid, sharedSize >>>(d_input, d_output, n);
cErr = cudaDeviceSynchronize();
if (cErr != cudaSuccess) fprintf(stderr, "%s\n", cudaGetErrorString(cErr));
}
void allocate(Point* d_input, Point* h_input, Edge* d_output, int n){
size_t memSize = n*sizeof(Point);
cudaMalloc((void**)&d_input, memSize);
cudaMalloc((void**)&d_output, memSize);
cudaMemcpy(d_input, h_input, memSize, cudaMemcpyHostToDevice);
}
void deallocate(Point* d_input, Point*d_output){
cudaFree(d_input);
cudaFree(d_output);
}
void printHull(Edge* out, int n){
for (int i = 0; i < n; i++)
fprintf(stderr, "%d,%d->%d,%d%s", out[i].p.x, out[i].p.y, out[i].q.x, out[i].q.y, ((i + 1 == n) ? "\n" : ", "));
}
void swap(Point* a, Point* b){
Point temp = *a;
*a = *b;
*b = temp;
}
/**
* Modification: Added extra conditional to allow
* sorting by x then y if a.x == b.x
**/
int partition(Point* input, int l, int r){
int x = input[r].x;
int y = input[r].y;
int i = (l - 1);
for (int j = l; j <= r-1; j++){
//was : if(input[j].x <= x)
if(input[j].x < x){
i++;
swap(&input[i], &input[j]);
}
else if (input[j].x == x){
if (input[j].y < y){
i++;
swap(&input[i], &input[j]);
}
}
}
swap(&input[i+1], &input[r]);
return i+1;
}
/**
* Code for iterative serial quicksort comes from
* http://www.geeksforgeeks.org/iterative-quick-sort/
* Author: Aashish Barnwal
**/
void quickSortIterative(Point* input, int l, int r){
int stack[r - l + 1];
int top = -1;
stack[++top] = l;
stack[++top] = r;
while (top >=0){
r = stack[top--];
l = stack[top--];
int p = partition(input, l, r);
if (p-1 > l){
stack[++top] = l;
stack[++top] = p-1;
}
if (p+1 < r){
stack[++top] = p+1;
stack[++top] = r;
}
}
}
void checkSort(Point* in, int n){
for(int i = 0; i < n; i++){
fprintf(stderr, "%d %d\n", in[i].x, in[i].y);
}
}
int computeDistance(Point a, Point b, Point c){
return (b.x - a.x)*(c.y-a.y)-(b.y-a.y)*(c.x-a.x);
}
int insert(Edge* hull, Point c, Edge old, int hullCounter){
//printHull(hull, hullCounter);
//fprintf(stderr, "Inserting %d,%d\n", c.x, c.y);
int insertIdx;
for(insertIdx = 0; insertIdx < hullCounter; insertIdx++){
if (hull[insertIdx] == old) break;
}
for(int i = hullCounter; i > insertIdx + 1; i--){
hull[i] = hull[i - 1];
}
Edge e;
e.q = old.q;
e.p = c;
old.q = c;
hull[insertIdx] = old;
hull[insertIdx + 1] = e;
//printHull(hull, hullCounter+1);
return ++hullCounter;
}
int serialFindHull(Point* set, Point a, Point b, Edge* hull, int setCounter, int setMaxIdx, int hullCounter){
if (setCounter == 0){
return hullCounter;
}
Point c = set[setMaxIdx];
Edge old;
old.p = a;
old.q = b;
hullCounter = insert(hull, c, old, hullCounter);
Point* s1;
Point* s2;
s1 = (Point*)malloc((setCounter-2)*sizeof(Point));
int s1counter = 0;
int s1MaxIdx = -1;
int s1MaxVal = 0;
s2 = (Point*)malloc((setCounter-2)*sizeof(Point));
int s2counter = 0;
int s2MaxIdx = -1;
int s2MaxVal = 0;
for (int i = 0; i < setCounter; i++){
Point p = set[i];
int res;
if ((res = computeDistance(a, c, p)) > 0){
s1[s1counter++] = p;
if (res > s1MaxVal){
s1MaxIdx = s1counter - 1;
s1MaxVal = res;
}
}
else if ((res = computeDistance(c, b, p)) > 0){
s2[s2counter++] = p;
if (res > s2MaxVal){
s2MaxIdx = s2counter - 1;
s2MaxVal = res;
}
}
}
hullCounter = serialFindHull(s1, a, c, hull, s1counter, s1MaxIdx, hullCounter);
hullCounter = serialFindHull(s2, c, b, hull, s2counter, s2MaxIdx, hullCounter);
free(s1);
free(s2);
return hullCounter;
}
int serialHull(Point* in, Edge* out, int n){
//memset(out, NULL, n);
int hullCounter = 0;
Edge a = {in[0], in[n-1]};
a.p = in[0];
a.q = in[n-1];
out[hullCounter++] = a;
Point* s1;
Point* s2;
s1 = (Point*)malloc((n-2)*sizeof(Point));
int s1counter = 0;
int s1MaxIdx = 0;
int s1MaxVal = 0;
s2 = (Point*)malloc((n-2)*sizeof(Point));
int s2counter = 0;
int s2MaxIdx = 0;
int s2MaxVal = 0;
for (int i = 1; i < n-2; i++){
Point p = in[i];
int res;
if ((res = computeDistance(in[0], in[n-1], p)) > 0){
s1[s1counter++] = p;
if (res > s1MaxVal) {
s1MaxIdx = s1counter - 1;
s1MaxVal = res;
}
}
else if ((res = computeDistance(in[n-1], in[0], p)) > 0){
s2[s2counter++] = p;
if (res > s2MaxVal){
s2MaxIdx = s2counter - 1;
s2MaxVal = res;
}
}
}
hullCounter = serialFindHull(s1, in[0], in[n-1], out, s1counter, s1MaxIdx, hullCounter);
a.p = in[n-1];
a.q = in[0];
out[hullCounter++] = a;
hullCounter = serialFindHull(s2, in[n-1], in[0], out, s2counter, s2MaxIdx, hullCounter);
free(s1);
free(s2);
return hullCounter;
}
void doSerialQuickHull(Point* in, Edge* out, int n){
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
//fprintf(stderr, "starting serial quick sort\n");
cudaEventRecord(start, 0);
quickSortIterative(in, 0, n-1);
//fprintf(stderr, "finished serial quick sort\n");
//checkSort(in, n);
//fprintf(stderr, "starting serial quick hull\n");
int hulls = serialHull(in, out, n);
cudaEventRecord(stop, 0);
//fprintf(stderr, "finsihed serial quick hull\n");
//printHull(out, hulls);
cudaEventElapsedTime(&time, start, stop);
fprintf(stderr, "serial quickhull runtime: %f ms\n", time);
}
int main(int argc, char** argv){
//get input passed as arg
FILE* input = fopen(argv[1], "r");
//get number of points
int numPoints;
fscanf(input, "%d", &numPoints);
size_t memSize = numPoints * sizeof(Point);
size_t outSize = numPoints * sizeof(Edge);
//host input/output
Point* h_input = (Point*)malloc(memSize);
Edge* h_output = (Edge*)malloc(outSize);
Edge* h_ref = (Edge*)malloc(outSize);
//initialize input
for (int i = 0; i < numPoints; i++){
Point p;
fscanf(input, "%d %d", &p.x, &p.y);
h_input[i] = p;
}
fprintf(stderr, "Read input\n");
doSerialQuickHull(h_input, h_ref, numPoints);
fprintf(stderr, "Quick Hull completed\n");
//device ptrs
//Point* d_input;
//Edge* d_output;
//allocate and copy to card
//allocate(d_input, h_input, d_output, numPoints);
//launch
//launchQuickHull(d_input, d_output, numPoints);
//copy back
//cudaMemcpy(h_output, d_output, numPoints*sizeof(Edge), cudaMemcpyDeviceToHost);
//deallocate card
//deallocate(d_input, d_output);
//print results
/*
for (int i = 0; i < numPoints; i++){
Edge e = h_output[i];
fprintf(stderr, "%d %d\n", e.x, e.y);
}
*/
return 0;
}
|
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
/**
* Quickhull.cu
* Author: Michael Gruesen
* A quickhull implementation for NVIDIA GPGPU Compute Capability 2.0
*
* Serial QSort Code Adapted
* from : Aashish Barnwal
* source: http://www.geeksforgeeks.org/iterative-quick-sort/
*
* Parallel QSort Code Adapted
* from : Seiral QSort Code
**/
struct Point {
int x, y;
bool operator==(Point a){ return x == a.x && y == a.y;}
};
struct Edge {
Point p;
Point q;
bool operator==(Edge a){return p == a.p && q == a.q;}
};
__global__ void quickhull(Point* d_input, Point* d_output, int n){
}
__host__ void launchQuickHull(Point* d_input, Point* d_output, int n){
dim3 dimGrid;
dim3 dimBlock;
size_t sharedSize = n * sizeof(Edge);
hipError_t cErr;
quickhull<<< dimBlock, dimGrid, sharedSize >>>(d_input, d_output, n);
cErr = hipDeviceSynchronize();
if (cErr != hipSuccess) fprintf(stderr, "%s\n", hipGetErrorString(cErr));
}
void allocate(Point* d_input, Point* h_input, Edge* d_output, int n){
size_t memSize = n*sizeof(Point);
hipMalloc((void**)&d_input, memSize);
hipMalloc((void**)&d_output, memSize);
hipMemcpy(d_input, h_input, memSize, hipMemcpyHostToDevice);
}
void deallocate(Point* d_input, Point*d_output){
hipFree(d_input);
hipFree(d_output);
}
void printHull(Edge* out, int n){
for (int i = 0; i < n; i++)
fprintf(stderr, "%d,%d->%d,%d%s", out[i].p.x, out[i].p.y, out[i].q.x, out[i].q.y, ((i + 1 == n) ? "\n" : ", "));
}
void swap(Point* a, Point* b){
Point temp = *a;
*a = *b;
*b = temp;
}
/**
* Modification: Added extra conditional to allow
* sorting by x then y if a.x == b.x
**/
int partition(Point* input, int l, int r){
int x = input[r].x;
int y = input[r].y;
int i = (l - 1);
for (int j = l; j <= r-1; j++){
//was : if(input[j].x <= x)
if(input[j].x < x){
i++;
swap(&input[i], &input[j]);
}
else if (input[j].x == x){
if (input[j].y < y){
i++;
swap(&input[i], &input[j]);
}
}
}
swap(&input[i+1], &input[r]);
return i+1;
}
/**
* Code for iterative serial quicksort comes from
* http://www.geeksforgeeks.org/iterative-quick-sort/
* Author: Aashish Barnwal
**/
void quickSortIterative(Point* input, int l, int r){
int stack[r - l + 1];
int top = -1;
stack[++top] = l;
stack[++top] = r;
while (top >=0){
r = stack[top--];
l = stack[top--];
int p = partition(input, l, r);
if (p-1 > l){
stack[++top] = l;
stack[++top] = p-1;
}
if (p+1 < r){
stack[++top] = p+1;
stack[++top] = r;
}
}
}
void checkSort(Point* in, int n){
for(int i = 0; i < n; i++){
fprintf(stderr, "%d %d\n", in[i].x, in[i].y);
}
}
int computeDistance(Point a, Point b, Point c){
return (b.x - a.x)*(c.y-a.y)-(b.y-a.y)*(c.x-a.x);
}
int insert(Edge* hull, Point c, Edge old, int hullCounter){
//printHull(hull, hullCounter);
//fprintf(stderr, "Inserting %d,%d\n", c.x, c.y);
int insertIdx;
for(insertIdx = 0; insertIdx < hullCounter; insertIdx++){
if (hull[insertIdx] == old) break;
}
for(int i = hullCounter; i > insertIdx + 1; i--){
hull[i] = hull[i - 1];
}
Edge e;
e.q = old.q;
e.p = c;
old.q = c;
hull[insertIdx] = old;
hull[insertIdx + 1] = e;
//printHull(hull, hullCounter+1);
return ++hullCounter;
}
int serialFindHull(Point* set, Point a, Point b, Edge* hull, int setCounter, int setMaxIdx, int hullCounter){
if (setCounter == 0){
return hullCounter;
}
Point c = set[setMaxIdx];
Edge old;
old.p = a;
old.q = b;
hullCounter = insert(hull, c, old, hullCounter);
Point* s1;
Point* s2;
s1 = (Point*)malloc((setCounter-2)*sizeof(Point));
int s1counter = 0;
int s1MaxIdx = -1;
int s1MaxVal = 0;
s2 = (Point*)malloc((setCounter-2)*sizeof(Point));
int s2counter = 0;
int s2MaxIdx = -1;
int s2MaxVal = 0;
for (int i = 0; i < setCounter; i++){
Point p = set[i];
int res;
if ((res = computeDistance(a, c, p)) > 0){
s1[s1counter++] = p;
if (res > s1MaxVal){
s1MaxIdx = s1counter - 1;
s1MaxVal = res;
}
}
else if ((res = computeDistance(c, b, p)) > 0){
s2[s2counter++] = p;
if (res > s2MaxVal){
s2MaxIdx = s2counter - 1;
s2MaxVal = res;
}
}
}
hullCounter = serialFindHull(s1, a, c, hull, s1counter, s1MaxIdx, hullCounter);
hullCounter = serialFindHull(s2, c, b, hull, s2counter, s2MaxIdx, hullCounter);
free(s1);
free(s2);
return hullCounter;
}
int serialHull(Point* in, Edge* out, int n){
//memset(out, NULL, n);
int hullCounter = 0;
Edge a = {in[0], in[n-1]};
a.p = in[0];
a.q = in[n-1];
out[hullCounter++] = a;
Point* s1;
Point* s2;
s1 = (Point*)malloc((n-2)*sizeof(Point));
int s1counter = 0;
int s1MaxIdx = 0;
int s1MaxVal = 0;
s2 = (Point*)malloc((n-2)*sizeof(Point));
int s2counter = 0;
int s2MaxIdx = 0;
int s2MaxVal = 0;
for (int i = 1; i < n-2; i++){
Point p = in[i];
int res;
if ((res = computeDistance(in[0], in[n-1], p)) > 0){
s1[s1counter++] = p;
if (res > s1MaxVal) {
s1MaxIdx = s1counter - 1;
s1MaxVal = res;
}
}
else if ((res = computeDistance(in[n-1], in[0], p)) > 0){
s2[s2counter++] = p;
if (res > s2MaxVal){
s2MaxIdx = s2counter - 1;
s2MaxVal = res;
}
}
}
hullCounter = serialFindHull(s1, in[0], in[n-1], out, s1counter, s1MaxIdx, hullCounter);
a.p = in[n-1];
a.q = in[0];
out[hullCounter++] = a;
hullCounter = serialFindHull(s2, in[n-1], in[0], out, s2counter, s2MaxIdx, hullCounter);
free(s1);
free(s2);
return hullCounter;
}
void doSerialQuickHull(Point* in, Edge* out, int n){
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
//fprintf(stderr, "starting serial quick sort\n");
hipEventRecord(start, 0);
quickSortIterative(in, 0, n-1);
//fprintf(stderr, "finished serial quick sort\n");
//checkSort(in, n);
//fprintf(stderr, "starting serial quick hull\n");
int hulls = serialHull(in, out, n);
hipEventRecord(stop, 0);
//fprintf(stderr, "finsihed serial quick hull\n");
//printHull(out, hulls);
hipEventElapsedTime(&time, start, stop);
fprintf(stderr, "serial quickhull runtime: %f ms\n", time);
}
int main(int argc, char** argv){
//get input passed as arg
FILE* input = fopen(argv[1], "r");
//get number of points
int numPoints;
fscanf(input, "%d", &numPoints);
size_t memSize = numPoints * sizeof(Point);
size_t outSize = numPoints * sizeof(Edge);
//host input/output
Point* h_input = (Point*)malloc(memSize);
Edge* h_output = (Edge*)malloc(outSize);
Edge* h_ref = (Edge*)malloc(outSize);
//initialize input
for (int i = 0; i < numPoints; i++){
Point p;
fscanf(input, "%d %d", &p.x, &p.y);
h_input[i] = p;
}
fprintf(stderr, "Read input\n");
doSerialQuickHull(h_input, h_ref, numPoints);
fprintf(stderr, "Quick Hull completed\n");
//device ptrs
//Point* d_input;
//Edge* d_output;
//allocate and copy to card
//allocate(d_input, h_input, d_output, numPoints);
//launch
//launchQuickHull(d_input, d_output, numPoints);
//copy back
//cudaMemcpy(h_output, d_output, numPoints*sizeof(Edge), cudaMemcpyDeviceToHost);
//deallocate card
//deallocate(d_input, d_output);
//print results
/*
for (int i = 0; i < numPoints; i++){
Edge e = h_output[i];
fprintf(stderr, "%d %d\n", e.x, e.y);
}
*/
return 0;
}
|
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
|
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
/**
* Quickhull.cu
* Author: Michael Gruesen
* A quickhull implementation for NVIDIA GPGPU Compute Capability 2.0
*
* Serial QSort Code Adapted
* from : Aashish Barnwal
* source: http://www.geeksforgeeks.org/iterative-quick-sort/
*
* Parallel QSort Code Adapted
* from : Seiral QSort Code
**/
struct Point {
int x, y;
bool operator==(Point a){ return x == a.x && y == a.y;}
};
struct Edge {
Point p;
Point q;
bool operator==(Edge a){return p == a.p && q == a.q;}
};
__global__ void quickhull(Point* d_input, Point* d_output, int n){
}
__host__ void launchQuickHull(Point* d_input, Point* d_output, int n){
dim3 dimGrid;
dim3 dimBlock;
size_t sharedSize = n * sizeof(Edge);
hipError_t cErr;
quickhull<<< dimBlock, dimGrid, sharedSize >>>(d_input, d_output, n);
cErr = hipDeviceSynchronize();
if (cErr != hipSuccess) fprintf(stderr, "%s\n", hipGetErrorString(cErr));
}
void allocate(Point* d_input, Point* h_input, Edge* d_output, int n){
size_t memSize = n*sizeof(Point);
hipMalloc((void**)&d_input, memSize);
hipMalloc((void**)&d_output, memSize);
hipMemcpy(d_input, h_input, memSize, hipMemcpyHostToDevice);
}
void deallocate(Point* d_input, Point*d_output){
hipFree(d_input);
hipFree(d_output);
}
void printHull(Edge* out, int n){
for (int i = 0; i < n; i++)
fprintf(stderr, "%d,%d->%d,%d%s", out[i].p.x, out[i].p.y, out[i].q.x, out[i].q.y, ((i + 1 == n) ? "\n" : ", "));
}
void swap(Point* a, Point* b){
Point temp = *a;
*a = *b;
*b = temp;
}
/**
* Modification: Added extra conditional to allow
* sorting by x then y if a.x == b.x
**/
int partition(Point* input, int l, int r){
int x = input[r].x;
int y = input[r].y;
int i = (l - 1);
for (int j = l; j <= r-1; j++){
//was : if(input[j].x <= x)
if(input[j].x < x){
i++;
swap(&input[i], &input[j]);
}
else if (input[j].x == x){
if (input[j].y < y){
i++;
swap(&input[i], &input[j]);
}
}
}
swap(&input[i+1], &input[r]);
return i+1;
}
/**
* Code for iterative serial quicksort comes from
* http://www.geeksforgeeks.org/iterative-quick-sort/
* Author: Aashish Barnwal
**/
void quickSortIterative(Point* input, int l, int r){
int stack[r - l + 1];
int top = -1;
stack[++top] = l;
stack[++top] = r;
while (top >=0){
r = stack[top--];
l = stack[top--];
int p = partition(input, l, r);
if (p-1 > l){
stack[++top] = l;
stack[++top] = p-1;
}
if (p+1 < r){
stack[++top] = p+1;
stack[++top] = r;
}
}
}
void checkSort(Point* in, int n){
for(int i = 0; i < n; i++){
fprintf(stderr, "%d %d\n", in[i].x, in[i].y);
}
}
int computeDistance(Point a, Point b, Point c){
return (b.x - a.x)*(c.y-a.y)-(b.y-a.y)*(c.x-a.x);
}
int insert(Edge* hull, Point c, Edge old, int hullCounter){
//printHull(hull, hullCounter);
//fprintf(stderr, "Inserting %d,%d\n", c.x, c.y);
int insertIdx;
for(insertIdx = 0; insertIdx < hullCounter; insertIdx++){
if (hull[insertIdx] == old) break;
}
for(int i = hullCounter; i > insertIdx + 1; i--){
hull[i] = hull[i - 1];
}
Edge e;
e.q = old.q;
e.p = c;
old.q = c;
hull[insertIdx] = old;
hull[insertIdx + 1] = e;
//printHull(hull, hullCounter+1);
return ++hullCounter;
}
int serialFindHull(Point* set, Point a, Point b, Edge* hull, int setCounter, int setMaxIdx, int hullCounter){
if (setCounter == 0){
return hullCounter;
}
Point c = set[setMaxIdx];
Edge old;
old.p = a;
old.q = b;
hullCounter = insert(hull, c, old, hullCounter);
Point* s1;
Point* s2;
s1 = (Point*)malloc((setCounter-2)*sizeof(Point));
int s1counter = 0;
int s1MaxIdx = -1;
int s1MaxVal = 0;
s2 = (Point*)malloc((setCounter-2)*sizeof(Point));
int s2counter = 0;
int s2MaxIdx = -1;
int s2MaxVal = 0;
for (int i = 0; i < setCounter; i++){
Point p = set[i];
int res;
if ((res = computeDistance(a, c, p)) > 0){
s1[s1counter++] = p;
if (res > s1MaxVal){
s1MaxIdx = s1counter - 1;
s1MaxVal = res;
}
}
else if ((res = computeDistance(c, b, p)) > 0){
s2[s2counter++] = p;
if (res > s2MaxVal){
s2MaxIdx = s2counter - 1;
s2MaxVal = res;
}
}
}
hullCounter = serialFindHull(s1, a, c, hull, s1counter, s1MaxIdx, hullCounter);
hullCounter = serialFindHull(s2, c, b, hull, s2counter, s2MaxIdx, hullCounter);
free(s1);
free(s2);
return hullCounter;
}
int serialHull(Point* in, Edge* out, int n){
//memset(out, NULL, n);
int hullCounter = 0;
Edge a = {in[0], in[n-1]};
a.p = in[0];
a.q = in[n-1];
out[hullCounter++] = a;
Point* s1;
Point* s2;
s1 = (Point*)malloc((n-2)*sizeof(Point));
int s1counter = 0;
int s1MaxIdx = 0;
int s1MaxVal = 0;
s2 = (Point*)malloc((n-2)*sizeof(Point));
int s2counter = 0;
int s2MaxIdx = 0;
int s2MaxVal = 0;
for (int i = 1; i < n-2; i++){
Point p = in[i];
int res;
if ((res = computeDistance(in[0], in[n-1], p)) > 0){
s1[s1counter++] = p;
if (res > s1MaxVal) {
s1MaxIdx = s1counter - 1;
s1MaxVal = res;
}
}
else if ((res = computeDistance(in[n-1], in[0], p)) > 0){
s2[s2counter++] = p;
if (res > s2MaxVal){
s2MaxIdx = s2counter - 1;
s2MaxVal = res;
}
}
}
hullCounter = serialFindHull(s1, in[0], in[n-1], out, s1counter, s1MaxIdx, hullCounter);
a.p = in[n-1];
a.q = in[0];
out[hullCounter++] = a;
hullCounter = serialFindHull(s2, in[n-1], in[0], out, s2counter, s2MaxIdx, hullCounter);
free(s1);
free(s2);
return hullCounter;
}
void doSerialQuickHull(Point* in, Edge* out, int n){
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
//fprintf(stderr, "starting serial quick sort\n");
hipEventRecord(start, 0);
quickSortIterative(in, 0, n-1);
//fprintf(stderr, "finished serial quick sort\n");
//checkSort(in, n);
//fprintf(stderr, "starting serial quick hull\n");
int hulls = serialHull(in, out, n);
hipEventRecord(stop, 0);
//fprintf(stderr, "finsihed serial quick hull\n");
//printHull(out, hulls);
hipEventElapsedTime(&time, start, stop);
fprintf(stderr, "serial quickhull runtime: %f ms\n", time);
}
int main(int argc, char** argv){
//get input passed as arg
FILE* input = fopen(argv[1], "r");
//get number of points
int numPoints;
fscanf(input, "%d", &numPoints);
size_t memSize = numPoints * sizeof(Point);
size_t outSize = numPoints * sizeof(Edge);
//host input/output
Point* h_input = (Point*)malloc(memSize);
Edge* h_output = (Edge*)malloc(outSize);
Edge* h_ref = (Edge*)malloc(outSize);
//initialize input
for (int i = 0; i < numPoints; i++){
Point p;
fscanf(input, "%d %d", &p.x, &p.y);
h_input[i] = p;
}
fprintf(stderr, "Read input\n");
doSerialQuickHull(h_input, h_ref, numPoints);
fprintf(stderr, "Quick Hull completed\n");
//device ptrs
//Point* d_input;
//Edge* d_output;
//allocate and copy to card
//allocate(d_input, h_input, d_output, numPoints);
//launch
//launchQuickHull(d_input, d_output, numPoints);
//copy back
//cudaMemcpy(h_output, d_output, numPoints*sizeof(Edge), cudaMemcpyDeviceToHost);
//deallocate card
//deallocate(d_input, d_output);
//print results
/*
for (int i = 0; i < numPoints; i++){
Edge e = h_output[i];
fprintf(stderr, "%d %d\n", e.x, e.y);
}
*/
return 0;
}
|
.text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z9quickhullP5PointS0_i
.globl _Z9quickhullP5PointS0_i
.p2align 8
.type _Z9quickhullP5PointS0_i,@function
_Z9quickhullP5PointS0_i:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9quickhullP5PointS0_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 20
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 1
.amdhsa_next_free_sgpr 1
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z9quickhullP5PointS0_i, .Lfunc_end0-_Z9quickhullP5PointS0_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 20
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9quickhullP5PointS0_i
.private_segment_fixed_size: 0
.sgpr_count: 0
.sgpr_spill_count: 0
.symbol: _Z9quickhullP5PointS0_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 0
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata
|
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
|
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
/**
* Quickhull.cu
* Author: Michael Gruesen
* A quickhull implementation for NVIDIA GPGPU Compute Capability 2.0
*
* Serial QSort Code Adapted
* from : Aashish Barnwal
* source: http://www.geeksforgeeks.org/iterative-quick-sort/
*
* Parallel QSort Code Adapted
* from : Seiral QSort Code
**/
struct Point {
int x, y;
bool operator==(Point a){ return x == a.x && y == a.y;}
};
struct Edge {
Point p;
Point q;
bool operator==(Edge a){return p == a.p && q == a.q;}
};
__global__ void quickhull(Point* d_input, Point* d_output, int n){
}
__host__ void launchQuickHull(Point* d_input, Point* d_output, int n){
dim3 dimGrid;
dim3 dimBlock;
size_t sharedSize = n * sizeof(Edge);
hipError_t cErr;
quickhull<<< dimBlock, dimGrid, sharedSize >>>(d_input, d_output, n);
cErr = hipDeviceSynchronize();
if (cErr != hipSuccess) fprintf(stderr, "%s\n", hipGetErrorString(cErr));
}
void allocate(Point* d_input, Point* h_input, Edge* d_output, int n){
size_t memSize = n*sizeof(Point);
hipMalloc((void**)&d_input, memSize);
hipMalloc((void**)&d_output, memSize);
hipMemcpy(d_input, h_input, memSize, hipMemcpyHostToDevice);
}
void deallocate(Point* d_input, Point*d_output){
hipFree(d_input);
hipFree(d_output);
}
void printHull(Edge* out, int n){
for (int i = 0; i < n; i++)
fprintf(stderr, "%d,%d->%d,%d%s", out[i].p.x, out[i].p.y, out[i].q.x, out[i].q.y, ((i + 1 == n) ? "\n" : ", "));
}
void swap(Point* a, Point* b){
Point temp = *a;
*a = *b;
*b = temp;
}
/**
* Modification: Added extra conditional to allow
* sorting by x then y if a.x == b.x
**/
int partition(Point* input, int l, int r){
int x = input[r].x;
int y = input[r].y;
int i = (l - 1);
for (int j = l; j <= r-1; j++){
//was : if(input[j].x <= x)
if(input[j].x < x){
i++;
swap(&input[i], &input[j]);
}
else if (input[j].x == x){
if (input[j].y < y){
i++;
swap(&input[i], &input[j]);
}
}
}
swap(&input[i+1], &input[r]);
return i+1;
}
/**
* Code for iterative serial quicksort comes from
* http://www.geeksforgeeks.org/iterative-quick-sort/
* Author: Aashish Barnwal
**/
void quickSortIterative(Point* input, int l, int r){
int stack[r - l + 1];
int top = -1;
stack[++top] = l;
stack[++top] = r;
while (top >=0){
r = stack[top--];
l = stack[top--];
int p = partition(input, l, r);
if (p-1 > l){
stack[++top] = l;
stack[++top] = p-1;
}
if (p+1 < r){
stack[++top] = p+1;
stack[++top] = r;
}
}
}
void checkSort(Point* in, int n){
for(int i = 0; i < n; i++){
fprintf(stderr, "%d %d\n", in[i].x, in[i].y);
}
}
int computeDistance(Point a, Point b, Point c){
return (b.x - a.x)*(c.y-a.y)-(b.y-a.y)*(c.x-a.x);
}
int insert(Edge* hull, Point c, Edge old, int hullCounter){
//printHull(hull, hullCounter);
//fprintf(stderr, "Inserting %d,%d\n", c.x, c.y);
int insertIdx;
for(insertIdx = 0; insertIdx < hullCounter; insertIdx++){
if (hull[insertIdx] == old) break;
}
for(int i = hullCounter; i > insertIdx + 1; i--){
hull[i] = hull[i - 1];
}
Edge e;
e.q = old.q;
e.p = c;
old.q = c;
hull[insertIdx] = old;
hull[insertIdx + 1] = e;
//printHull(hull, hullCounter+1);
return ++hullCounter;
}
int serialFindHull(Point* set, Point a, Point b, Edge* hull, int setCounter, int setMaxIdx, int hullCounter){
if (setCounter == 0){
return hullCounter;
}
Point c = set[setMaxIdx];
Edge old;
old.p = a;
old.q = b;
hullCounter = insert(hull, c, old, hullCounter);
Point* s1;
Point* s2;
s1 = (Point*)malloc((setCounter-2)*sizeof(Point));
int s1counter = 0;
int s1MaxIdx = -1;
int s1MaxVal = 0;
s2 = (Point*)malloc((setCounter-2)*sizeof(Point));
int s2counter = 0;
int s2MaxIdx = -1;
int s2MaxVal = 0;
for (int i = 0; i < setCounter; i++){
Point p = set[i];
int res;
if ((res = computeDistance(a, c, p)) > 0){
s1[s1counter++] = p;
if (res > s1MaxVal){
s1MaxIdx = s1counter - 1;
s1MaxVal = res;
}
}
else if ((res = computeDistance(c, b, p)) > 0){
s2[s2counter++] = p;
if (res > s2MaxVal){
s2MaxIdx = s2counter - 1;
s2MaxVal = res;
}
}
}
hullCounter = serialFindHull(s1, a, c, hull, s1counter, s1MaxIdx, hullCounter);
hullCounter = serialFindHull(s2, c, b, hull, s2counter, s2MaxIdx, hullCounter);
free(s1);
free(s2);
return hullCounter;
}
int serialHull(Point* in, Edge* out, int n){
//memset(out, NULL, n);
int hullCounter = 0;
Edge a = {in[0], in[n-1]};
a.p = in[0];
a.q = in[n-1];
out[hullCounter++] = a;
Point* s1;
Point* s2;
s1 = (Point*)malloc((n-2)*sizeof(Point));
int s1counter = 0;
int s1MaxIdx = 0;
int s1MaxVal = 0;
s2 = (Point*)malloc((n-2)*sizeof(Point));
int s2counter = 0;
int s2MaxIdx = 0;
int s2MaxVal = 0;
for (int i = 1; i < n-2; i++){
Point p = in[i];
int res;
if ((res = computeDistance(in[0], in[n-1], p)) > 0){
s1[s1counter++] = p;
if (res > s1MaxVal) {
s1MaxIdx = s1counter - 1;
s1MaxVal = res;
}
}
else if ((res = computeDistance(in[n-1], in[0], p)) > 0){
s2[s2counter++] = p;
if (res > s2MaxVal){
s2MaxIdx = s2counter - 1;
s2MaxVal = res;
}
}
}
hullCounter = serialFindHull(s1, in[0], in[n-1], out, s1counter, s1MaxIdx, hullCounter);
a.p = in[n-1];
a.q = in[0];
out[hullCounter++] = a;
hullCounter = serialFindHull(s2, in[n-1], in[0], out, s2counter, s2MaxIdx, hullCounter);
free(s1);
free(s2);
return hullCounter;
}
void doSerialQuickHull(Point* in, Edge* out, int n){
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
//fprintf(stderr, "starting serial quick sort\n");
hipEventRecord(start, 0);
quickSortIterative(in, 0, n-1);
//fprintf(stderr, "finished serial quick sort\n");
//checkSort(in, n);
//fprintf(stderr, "starting serial quick hull\n");
int hulls = serialHull(in, out, n);
hipEventRecord(stop, 0);
//fprintf(stderr, "finsihed serial quick hull\n");
//printHull(out, hulls);
hipEventElapsedTime(&time, start, stop);
fprintf(stderr, "serial quickhull runtime: %f ms\n", time);
}
int main(int argc, char** argv){
//get input passed as arg
FILE* input = fopen(argv[1], "r");
//get number of points
int numPoints;
fscanf(input, "%d", &numPoints);
size_t memSize = numPoints * sizeof(Point);
size_t outSize = numPoints * sizeof(Edge);
//host input/output
Point* h_input = (Point*)malloc(memSize);
Edge* h_output = (Edge*)malloc(outSize);
Edge* h_ref = (Edge*)malloc(outSize);
//initialize input
for (int i = 0; i < numPoints; i++){
Point p;
fscanf(input, "%d %d", &p.x, &p.y);
h_input[i] = p;
}
fprintf(stderr, "Read input\n");
doSerialQuickHull(h_input, h_ref, numPoints);
fprintf(stderr, "Quick Hull completed\n");
//device ptrs
//Point* d_input;
//Edge* d_output;
//allocate and copy to card
//allocate(d_input, h_input, d_output, numPoints);
//launch
//launchQuickHull(d_input, d_output, numPoints);
//copy back
//cudaMemcpy(h_output, d_output, numPoints*sizeof(Edge), cudaMemcpyDeviceToHost);
//deallocate card
//deallocate(d_input, d_output);
//print results
/*
for (int i = 0; i < numPoints; i++){
Edge e = h_output[i];
fprintf(stderr, "%d %d\n", e.x, e.y);
}
*/
return 0;
}
|
.text
.file "quickhull.hip"
.globl _Z24__device_stub__quickhullP5PointS0_i # -- Begin function _Z24__device_stub__quickhullP5PointS0_i
.p2align 4, 0x90
.type _Z24__device_stub__quickhullP5PointS0_i,@function
_Z24__device_stub__quickhullP5PointS0_i: # @_Z24__device_stub__quickhullP5PointS0_i
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9quickhullP5PointS0_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z24__device_stub__quickhullP5PointS0_i, .Lfunc_end0-_Z24__device_stub__quickhullP5PointS0_i
.cfi_endproc
# -- End function
.globl _Z15launchQuickHullP5PointS0_i # -- Begin function _Z15launchQuickHullP5PointS0_i
.p2align 4, 0x90
.type _Z15launchQuickHullP5PointS0_i,@function
_Z15launchQuickHullP5PointS0_i: # @_Z15launchQuickHullP5PointS0_i
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $112, %rsp
.cfi_def_cfa_offset 144
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl %edx, %ebx
movq %rsi, %r14
movq %rdi, %r15
movslq %edx, %r8
shlq $4, %r8
movabsq $4294967297, %rdi # imm = 0x100000001
movl $1, %esi
movq %rdi, %rdx
movl $1, %ecx
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
movq %r15, 72(%rsp)
movq %r14, 64(%rsp)
movl %ebx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9quickhullP5PointS0_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
callq hipDeviceSynchronize
testl %eax, %eax
jne .LBB1_3
.LBB1_4:
addq $112, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.LBB1_3:
.cfi_def_cfa_offset 144
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %esi
movq %rbx, %rdi
movq %rax, %rdx
xorl %eax, %eax
callq fprintf
jmp .LBB1_4
.Lfunc_end1:
.size _Z15launchQuickHullP5PointS0_i, .Lfunc_end1-_Z15launchQuickHullP5PointS0_i
.cfi_endproc
# -- End function
.globl _Z8allocateP5PointS0_P4Edgei # -- Begin function _Z8allocateP5PointS0_P4Edgei
.p2align 4, 0x90
.type _Z8allocateP5PointS0_P4Edgei,@function
_Z8allocateP5PointS0_P4Edgei: # @_Z8allocateP5PointS0_P4Edgei
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
subq $24, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
movq %rsi, %rbx
movq %rdi, 8(%rsp)
movq %rdx, 16(%rsp)
movslq %ecx, %r14
shlq $3, %r14
leaq 8(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
leaq 16(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
movq 8(%rsp), %rdi
movq %rbx, %rsi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
addq $24, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size _Z8allocateP5PointS0_P4Edgei, .Lfunc_end2-_Z8allocateP5PointS0_P4Edgei
.cfi_endproc
# -- End function
.globl _Z10deallocateP5PointS0_ # -- Begin function _Z10deallocateP5PointS0_
.p2align 4, 0x90
.type _Z10deallocateP5PointS0_,@function
_Z10deallocateP5PointS0_: # @_Z10deallocateP5PointS0_
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset %rbx, -16
movq %rsi, %rbx
callq hipFree
movq %rbx, %rdi
popq %rbx
.cfi_def_cfa_offset 8
jmp hipFree # TAILCALL
.Lfunc_end3:
.size _Z10deallocateP5PointS0_, .Lfunc_end3-_Z10deallocateP5PointS0_
.cfi_endproc
# -- End function
.globl _Z9printHullP4Edgei # -- Begin function _Z9printHullP4Edgei
.p2align 4, 0x90
.type _Z9printHullP4Edgei,@function
_Z9printHullP4Edgei: # @_Z9printHullP4Edgei
.cfi_startproc
# %bb.0:
testl %esi, %esi
jle .LBB4_4
# %bb.1: # %.lr.ph.preheader
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r13
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
subq $16, %rsp
.cfi_def_cfa_offset 64
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r13, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rdi, %rbx
movl %esi, %r14d
shlq $4, %r14
leaq -16(%r14), %r15
movl $.L.str.2, %r12d
xorl %r13d, %r13d
.LBB4_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movq stderr(%rip), %rdi
movl (%rbx,%r13), %edx
movl 4(%rbx,%r13), %ecx
movl 8(%rbx,%r13), %r8d
movl 12(%rbx,%r13), %r9d
cmpq %r13, %r15
movl $.L.str.3, %eax
cmoveq %r12, %rax
movq %rax, (%rsp)
movl $.L.str.1, %esi
xorl %eax, %eax
callq fprintf
addq $16, %r13
cmpq %r13, %r14
jne .LBB4_2
# %bb.3:
addq $16, %rsp
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %r12
.cfi_restore %r13
.cfi_restore %r14
.cfi_restore %r15
.LBB4_4: # %._crit_edge
retq
.Lfunc_end4:
.size _Z9printHullP4Edgei, .Lfunc_end4-_Z9printHullP4Edgei
.cfi_endproc
# -- End function
.globl _Z4swapP5PointS0_ # -- Begin function _Z4swapP5PointS0_
.p2align 4, 0x90
.type _Z4swapP5PointS0_,@function
_Z4swapP5PointS0_: # @_Z4swapP5PointS0_
.cfi_startproc
# %bb.0:
movq (%rdi), %rax
movq (%rsi), %rcx
movq %rcx, (%rdi)
movq %rax, (%rsi)
retq
.Lfunc_end5:
.size _Z4swapP5PointS0_, .Lfunc_end5-_Z4swapP5PointS0_
.cfi_endproc
# -- End function
.globl _Z9partitionP5Pointii # -- Begin function _Z9partitionP5Pointii
.p2align 4, 0x90
.type _Z9partitionP5Pointii,@function
_Z9partitionP5Pointii: # @_Z9partitionP5Pointii
.cfi_startproc
# %bb.0:
movslq %edx, %rcx
cmpl %ecx, %esi
jge .LBB6_8
# %bb.1: # %.lr.ph.preheader
movl (%rdi,%rcx,8), %eax
movl 4(%rdi,%rcx,8), %edx
movslq %esi, %r8
decl %esi
jmp .LBB6_2
.p2align 4, 0x90
.LBB6_5: # %.sink.split
# in Loop: Header=BB6_2 Depth=1
movslq %esi, %r9
incl %esi
movq 8(%rdi,%r9,8), %r10
movq (%rdi,%r8,8), %r11
movq %r11, 8(%rdi,%r9,8)
movq %r10, (%rdi,%r8,8)
.LBB6_6: # in Loop: Header=BB6_2 Depth=1
incq %r8
cmpq %r8, %rcx
je .LBB6_7
.LBB6_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
cmpl %eax, (%rdi,%r8,8)
jl .LBB6_5
# %bb.3: # in Loop: Header=BB6_2 Depth=1
jne .LBB6_6
# %bb.4: # in Loop: Header=BB6_2 Depth=1
cmpl %edx, 4(%rdi,%r8,8)
jl .LBB6_5
jmp .LBB6_6
.LBB6_7: # %._crit_edge.loopexit
incl %esi
.LBB6_8: # %._crit_edge
movslq %esi, %rax
movq (%rdi,%rax,8), %rdx
movq (%rdi,%rcx,8), %rsi
movq %rsi, (%rdi,%rax,8)
movq %rdx, (%rdi,%rcx,8)
# kill: def $eax killed $eax killed $rax
retq
.Lfunc_end6:
.size _Z9partitionP5Pointii, .Lfunc_end6-_Z9partitionP5Pointii
.cfi_endproc
# -- End function
.globl _Z18quickSortIterativeP5Pointii # -- Begin function _Z18quickSortIterativeP5Pointii
.p2align 4, 0x90
.type _Z18quickSortIterativeP5Pointii,@function
_Z18quickSortIterativeP5Pointii: # @_Z18quickSortIterativeP5Pointii
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset %rbp, -16
movq %rsp, %rbp
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
pushq %rax
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
movl %edx, %eax
subl %esi, %eax
incl %eax
leaq 15(,%rax,4), %rcx
andq $-16, %rcx
movq %rsp, %r8
movq %r8, %rax
subq %rcx, %rax
negq %rcx
movq %rax, %rsp
movl %esi, (%r8,%rcx)
movl %edx, 4(%rax)
movl $1, %ecx
jmp .LBB7_1
.p2align 4, 0x90
.LBB7_14: # in Loop: Header=BB7_1 Depth=1
testl %ecx, %ecx
js .LBB7_15
.LBB7_1: # =>This Loop Header: Depth=1
# Child Loop BB7_3 Depth 2
movl %ecx, %r8d
movl -4(%rax,%r8,4), %r9d
movl (%rax,%r8,4), %edx
movslq %edx, %r10
movl %r9d, %esi
cmpl %r10d, %r9d
jge .LBB7_9
# %bb.2: # %.lr.ph.preheader.i
# in Loop: Header=BB7_1 Depth=1
movslq %r9d, %r11
movl (%rdi,%r10,8), %ebx
movl 4(%rdi,%r10,8), %r14d
leal -1(%r9), %esi
jmp .LBB7_3
.p2align 4, 0x90
.LBB7_6: # %.sink.split
# in Loop: Header=BB7_3 Depth=2
movslq %esi, %r15
incl %esi
movq 8(%rdi,%r15,8), %r12
movq (%rdi,%r11,8), %r13
movq %r13, 8(%rdi,%r15,8)
movq %r12, (%rdi,%r11,8)
.LBB7_7: # in Loop: Header=BB7_3 Depth=2
incq %r11
cmpq %r11, %r10
je .LBB7_8
.LBB7_3: # %.lr.ph.i
# Parent Loop BB7_1 Depth=1
# => This Inner Loop Header: Depth=2
cmpl %ebx, (%rdi,%r11,8)
jl .LBB7_6
# %bb.4: # in Loop: Header=BB7_3 Depth=2
jne .LBB7_7
# %bb.5: # in Loop: Header=BB7_3 Depth=2
cmpl %r14d, 4(%rdi,%r11,8)
jl .LBB7_6
jmp .LBB7_7
.p2align 4, 0x90
.LBB7_8: # %._crit_edge.loopexit.i
# in Loop: Header=BB7_1 Depth=1
incl %esi
.LBB7_9: # %_Z9partitionP5Pointii.exit
# in Loop: Header=BB7_1 Depth=1
movslq %esi, %r11
movq (%rdi,%r11,8), %rbx
movq (%rdi,%r10,8), %r14
movq %r14, (%rdi,%r11,8)
movq %rbx, (%rdi,%r10,8)
decl %r11d
cmpl %r9d, %r11d
jle .LBB7_10
# %bb.11: # in Loop: Header=BB7_1 Depth=1
movl %r11d, (%rax,%r8,4)
jmp .LBB7_12
.p2align 4, 0x90
.LBB7_10: # in Loop: Header=BB7_1 Depth=1
addl $-2, %ecx
.LBB7_12: # in Loop: Header=BB7_1 Depth=1
incl %esi
cmpl %edx, %esi
jge .LBB7_14
# %bb.13: # in Loop: Header=BB7_1 Depth=1
movslq %ecx, %r8
movl %esi, 4(%rax,%r8,4)
leal 2(%r8), %ecx
movl %edx, 8(%rax,%r8,4)
jmp .LBB7_14
.LBB7_15:
leaq -40(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
.cfi_def_cfa %rsp, 8
retq
.Lfunc_end7:
.size _Z18quickSortIterativeP5Pointii, .Lfunc_end7-_Z18quickSortIterativeP5Pointii
.cfi_endproc
# -- End function
.globl _Z9checkSortP5Pointi # -- Begin function _Z9checkSortP5Pointi
.p2align 4, 0x90
.type _Z9checkSortP5Pointi,@function
_Z9checkSortP5Pointi: # @_Z9checkSortP5Pointi
.cfi_startproc
# %bb.0:
testl %esi, %esi
jle .LBB8_4
# %bb.1: # %.lr.ph.preheader
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rdi, %rbx
movl %esi, %r14d
xorl %r15d, %r15d
.LBB8_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movq stderr(%rip), %rdi
movl (%rbx,%r15,8), %edx
movl 4(%rbx,%r15,8), %ecx
movl $.L.str.4, %esi
xorl %eax, %eax
callq fprintf
incq %r15
cmpq %r15, %r14
jne .LBB8_2
# %bb.3:
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %r14
.cfi_restore %r15
.LBB8_4: # %._crit_edge
retq
.Lfunc_end8:
.size _Z9checkSortP5Pointi, .Lfunc_end8-_Z9checkSortP5Pointi
.cfi_endproc
# -- End function
.globl _Z15computeDistance5PointS_S_ # -- Begin function _Z15computeDistance5PointS_S_
.p2align 4, 0x90
.type _Z15computeDistance5PointS_S_,@function
_Z15computeDistance5PointS_S_: # @_Z15computeDistance5PointS_S_
.cfi_startproc
# %bb.0:
movq %rdi, %rax
movq %rdi, %rcx
shrq $32, %rcx
movq %rsi, %rdi
shrq $32, %rdi
subl %eax, %esi
subl %edx, %eax
shrq $32, %rdx
subl %ecx, %edx
imull %edx, %esi
subl %ecx, %edi
imull %edi, %eax
addl %esi, %eax
# kill: def $eax killed $eax killed $rax
retq
.Lfunc_end9:
.size _Z15computeDistance5PointS_S_, .Lfunc_end9-_Z15computeDistance5PointS_S_
.cfi_endproc
# -- End function
.globl _Z6insertP4Edge5PointS_i # -- Begin function _Z6insertP4Edge5PointS_i
.p2align 4, 0x90
.type _Z6insertP4Edge5PointS_i,@function
_Z6insertP4Edge5PointS_i: # @_Z6insertP4Edge5PointS_i
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset %rbx, -16
movl %r8d, %eax
testl %r8d, %r8d
jle .LBB10_1
# %bb.2: # %.lr.ph
movl %eax, %r9d
movq %rdx, %r10
shrq $32, %r10
movq %rcx, %r11
shrq $32, %r11
leaq 12(%rdi), %rbx
xorl %r8d, %r8d
jmp .LBB10_3
.p2align 4, 0x90
.LBB10_7: # %.critedge
# in Loop: Header=BB10_3 Depth=1
incq %r8
addq $16, %rbx
cmpq %r8, %r9
je .LBB10_8
.LBB10_3: # =>This Inner Loop Header: Depth=1
cmpl %edx, -12(%rbx)
jne .LBB10_7
# %bb.4: # in Loop: Header=BB10_3 Depth=1
cmpl %r10d, -8(%rbx)
jne .LBB10_7
# %bb.5: # %_ZN4EdgeeqES_.exit
# in Loop: Header=BB10_3 Depth=1
cmpl %ecx, -4(%rbx)
jne .LBB10_7
# %bb.6: # %_ZN4EdgeeqES_.exit
# in Loop: Header=BB10_3 Depth=1
cmpl %r11d, (%rbx)
jne .LBB10_7
jmp .LBB10_9
.LBB10_8: # %_ZN4EdgeeqES_.exit._crit_edge.loopexit
movl %r9d, %r8d
jmp .LBB10_9
.LBB10_1:
xorl %r8d, %r8d
.LBB10_9: # %_ZN4EdgeeqES_.exit._crit_edge
leal 1(%r8), %r9d
cmpl %eax, %r9d
jge .LBB10_12
# %bb.10: # %.lr.ph27.preheader
movslq %eax, %r10
movslq %r9d, %r11
movq %r10, %rbx
shlq $4, %rbx
addq %rdi, %rbx
.p2align 4, 0x90
.LBB10_11: # %.lr.ph27
# =>This Inner Loop Header: Depth=1
movups -16(%rbx), %xmm0
movups %xmm0, (%rbx)
addq $-16, %rbx
decq %r10
cmpq %r11, %r10
jg .LBB10_11
.LBB10_12: # %._crit_edge28
movl %r8d, %r8d
shlq $4, %r8
movq %rdx, (%rdi,%r8)
movq %rsi, 8(%rdi,%r8)
movl %r9d, %edx
shlq $4, %rdx
movq %rsi, (%rdi,%rdx)
movq %rcx, 8(%rdi,%rdx)
incl %eax
popq %rbx
.cfi_def_cfa_offset 8
retq
.Lfunc_end10:
.size _Z6insertP4Edge5PointS_i, .Lfunc_end10-_Z6insertP4Edge5PointS_i
.cfi_endproc
# -- End function
.globl _Z14serialFindHullP5PointS_S_P4Edgeiii # -- Begin function _Z14serialFindHullP5PointS_S_P4Edgeiii
.p2align 4, 0x90
.type _Z14serialFindHullP5PointS_S_P4Edgeiii,@function
_Z14serialFindHullP5PointS_S_P4Edgeiii: # @_Z14serialFindHullP5PointS_S_P4Edgeiii
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $88, %rsp
.cfi_def_cfa_offset 144
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rdi, 40(%rsp) # 8-byte Spill
movl 144(%rsp), %eax
testl %r8d, %r8d
je .LBB11_15
# %bb.1:
movl %r8d, %ebp
movq %rdx, %rbx
movq %rsi, %r15
movslq %r9d, %r9
testl %eax, %eax
jle .LBB11_2
# %bb.3: # %.lr.ph.i
movq %r15, %rdx
shrq $32, %rdx
movq %rbx, %rsi
shrq $32, %rsi
movl %eax, %edi
leaq 12(%rcx), %r8
xorl %r10d, %r10d
jmp .LBB11_4
.p2align 4, 0x90
.LBB11_8: # %.critedge.i
# in Loop: Header=BB11_4 Depth=1
incq %r10
addq $16, %r8
cmpq %r10, %rdi
je .LBB11_9
.LBB11_4: # =>This Inner Loop Header: Depth=1
cmpl %r15d, -12(%r8)
jne .LBB11_8
# %bb.5: # in Loop: Header=BB11_4 Depth=1
cmpl %edx, -8(%r8)
jne .LBB11_8
# %bb.6: # %_ZN4EdgeeqES_.exit.i
# in Loop: Header=BB11_4 Depth=1
cmpl %ebx, -4(%r8)
jne .LBB11_8
# %bb.7: # %_ZN4EdgeeqES_.exit.i
# in Loop: Header=BB11_4 Depth=1
cmpl %esi, (%r8)
jne .LBB11_8
jmp .LBB11_10
.LBB11_15: # %common.ret
# kill: def $eax killed $eax killed $rax
jmp .LBB11_16
.LBB11_9: # %_ZN4EdgeeqES_.exit._crit_edge.i.loopexit
movl %eax, %r10d
jmp .LBB11_10
.LBB11_2:
xorl %r10d, %r10d
.LBB11_10: # %_ZN4EdgeeqES_.exit._crit_edge.i
movq 40(%rsp), %rdx # 8-byte Reload
movq (%rdx,%r9,8), %r12
leal 1(%r10), %r8d
cmpl %eax, %r8d
jge .LBB11_13
# %bb.11: # %.lr.ph27.preheader.i
movslq %eax, %rdx
movslq %r8d, %rsi
movq %rdx, %rdi
shlq $4, %rdi
addq %rcx, %rdi
.p2align 4, 0x90
.LBB11_12: # %.lr.ph27.i
# =>This Inner Loop Header: Depth=1
movups -16(%rdi), %xmm0
movups %xmm0, (%rdi)
addq $-16, %rdi
decq %rdx
cmpq %rsi, %rdx
jg .LBB11_12
.LBB11_13: # %_Z6insertP4Edge5PointS_i.exit
movl %r10d, %edx
shlq $4, %rdx
movq %r15, (%rcx,%rdx)
movq %r12, 8(%rcx,%rdx)
movl %r8d, %edx
shlq $4, %rdx
movq %r12, (%rcx,%rdx)
movq %rcx, 64(%rsp) # 8-byte Spill
movq %rbx, 8(%rcx,%rdx)
incl %eax
movq %rax, 48(%rsp) # 8-byte Spill
movslq %ebp, %rax
leaq -16(,%rax,8), %r14
movq %r14, %rdi
callq malloc
movq %rax, 32(%rsp) # 8-byte Spill
movq %r14, %rdi
callq malloc
movq %rax, 16(%rsp) # 8-byte Spill
testl %ebp, %ebp
movq %rbx, 56(%rsp) # 8-byte Spill
movq %r12, 24(%rsp) # 8-byte Spill
jle .LBB11_14
# %bb.17: # %.lr.ph
movq %r15, %rax
shrq $32, %rax
movq %r12, %rcx
shrq $32, %rcx
movl %r12d, %edx
subl %r15d, %edx
movl %ecx, %esi
subl %eax, %esi
movq %rbx, %rdi
shrq $32, %rdi
movl %ebx, %r8d
subl %r12d, %r8d
movl %r8d, 12(%rsp) # 4-byte Spill
movq %rcx, 80(%rsp) # 8-byte Spill
subl %ecx, %edi
movq %rdi, 72(%rsp) # 8-byte Spill
movl %ebp, %r11d
movl $-1, %r9d
xorl %ebp, %ebp
xorl %r8d, %r8d
xorl %r10d, %r10d
xorl %r12d, %r12d
movl $-1, 8(%rsp) # 4-byte Folded Spill
xorl %ecx, %ecx
jmp .LBB11_18
.p2align 4, 0x90
.LBB11_19: # in Loop: Header=BB11_18 Depth=1
movslq %r8d, %r14
incl %r8d
movq 32(%rsp), %r13 # 8-byte Reload
movq %rdi, (%r13,%r14,8)
cmpl %r10d, %ebx
cmovgl %ebx, %r10d
cmovgl %r14d, %r9d
.LBB11_22: # in Loop: Header=BB11_18 Depth=1
incq %rbp
cmpq %rbp, %r11
je .LBB11_23
.LBB11_18: # =>This Inner Loop Header: Depth=1
movq 40(%rsp), %rdi # 8-byte Reload
movq (%rdi,%rbp,8), %rdi
movq %rdi, %r14
shrq $32, %r14
movl %r14d, %r13d
subl %eax, %r13d
imull %edx, %r13d
movl %r15d, %ebx
subl %edi, %ebx
imull %esi, %ebx
addl %r13d, %ebx
testl %ebx, %ebx
jg .LBB11_19
# %bb.20: # in Loop: Header=BB11_18 Depth=1
subl 80(%rsp), %r14d # 4-byte Folded Reload
imull 12(%rsp), %r14d # 4-byte Folded Reload
movq 24(%rsp), %rbx # 8-byte Reload
# kill: def $ebx killed $ebx killed $rbx
subl %edi, %ebx
imull 72(%rsp), %ebx # 4-byte Folded Reload
addl %r14d, %ebx
testl %ebx, %ebx
jle .LBB11_22
# %bb.21: # in Loop: Header=BB11_18 Depth=1
movslq %r12d, %r14
incl %r12d
movq 16(%rsp), %r13 # 8-byte Reload
movq %rdi, (%r13,%r14,8)
cmpl %ecx, %ebx
cmovgl %ebx, %ecx
movl 8(%rsp), %edi # 4-byte Reload
cmovgl %r14d, %edi
movl %edi, 8(%rsp) # 4-byte Spill
jmp .LBB11_22
.LBB11_14:
xorl %r12d, %r12d
movl $-1, 8(%rsp) # 4-byte Folded Spill
movl $-1, %r9d
xorl %r8d, %r8d
.LBB11_23: # %._crit_edge
movq 48(%rsp), %rax # 8-byte Reload
movl %eax, (%rsp)
movq 32(%rsp), %r13 # 8-byte Reload
movq %r13, %rdi
movq %r15, %rsi
movq 24(%rsp), %r14 # 8-byte Reload
movq %r14, %rdx
movq 64(%rsp), %rbx # 8-byte Reload
movq %rbx, %rcx
callq _Z14serialFindHullP5PointS_S_P4Edgeiii
movl %eax, (%rsp)
movq 16(%rsp), %r15 # 8-byte Reload
movq %r15, %rdi
movq %r14, %rsi
movq 56(%rsp), %rdx # 8-byte Reload
movq %rbx, %rcx
movl %r12d, %r8d
movl 8(%rsp), %r9d # 4-byte Reload
callq _Z14serialFindHullP5PointS_S_P4Edgeiii
movl %eax, %ebp
movq %r13, %rdi
callq free
movq %r15, %rdi
callq free
movl %ebp, %eax
.LBB11_16: # %common.ret
addq $88, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end11:
.size _Z14serialFindHullP5PointS_S_P4Edgeiii, .Lfunc_end11-_Z14serialFindHullP5PointS_S_P4Edgeiii
.cfi_endproc
# -- End function
.globl _Z10serialHullP5PointP4Edgei # -- Begin function _Z10serialHullP5PointP4Edgei
.p2align 4, 0x90
.type _Z10serialHullP5PointP4Edgei,@function
_Z10serialHullP5PointP4Edgei: # @_Z10serialHullP5PointP4Edgei
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $72, %rsp
.cfi_def_cfa_offset 128
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rdi, %r12
movslq %edx, %rbx
movq -8(%rdi,%rbx,8), %rax
movq (%rdi), %rcx
movq %rcx, (%rsi)
movq %rsi, 48(%rsp) # 8-byte Spill
movq %rax, 8(%rsi)
leaq -16(,%rbx,8), %r15
movq %r15, %rdi
callq malloc
movq %rax, 40(%rsp) # 8-byte Spill
movq %r15, %rdi
callq malloc
movq %rax, 32(%rsp) # 8-byte Spill
movl $0, 12(%rsp) # 4-byte Folded Spill
movl $0, 8(%rsp) # 4-byte Folded Spill
movl $0, %r9d
movl $0, %r8d
movq %rbx, 24(%rsp) # 8-byte Spill
cmpl $4, %ebx
jl .LBB12_7
# %bb.1: # %.lr.ph
movq 24(%rsp), %rdx # 8-byte Reload
leal -2(%rdx), %ecx
movq (%r12), %rax
movq -8(%r12,%rdx,8), %rsi
movq %rax, %rdx
shrq $32, %rdx
movq %rsi, %r8
shrq $32, %r8
movl %esi, %edi
subl %eax, %edi
movl %r8d, %r10d
subl %edx, %r10d
movl %eax, %r9d
movq %rsi, 64(%rsp) # 8-byte Spill
subl %esi, %r9d
movl %r9d, 20(%rsp) # 4-byte Spill
movl %edx, %esi
movq %r8, 56(%rsp) # 8-byte Spill
subl %r8d, %esi
movl %esi, 16(%rsp) # 4-byte Spill
movl %ecx, %ebx
xorl %r8d, %r8d
movl $1, %ecx
xorl %r9d, %r9d
xorl %r11d, %r11d
movl $0, 8(%rsp) # 4-byte Folded Spill
movl $0, 12(%rsp) # 4-byte Folded Spill
xorl %r15d, %r15d
jmp .LBB12_2
.p2align 4, 0x90
.LBB12_3: # in Loop: Header=BB12_2 Depth=1
movslq %r8d, %r13
incl %r8d
movq 40(%rsp), %rbp # 8-byte Reload
movq %rsi, (%rbp,%r13,8)
cmpl %r11d, %r14d
cmovgl %r14d, %r11d
cmovgl %r13d, %r9d
.LBB12_6: # in Loop: Header=BB12_2 Depth=1
incq %rcx
cmpq %rcx, %rbx
je .LBB12_7
.LBB12_2: # =>This Inner Loop Header: Depth=1
movq (%r12,%rcx,8), %rsi
movq %rsi, %rbp
shrq $32, %rbp
movl %ebp, %r13d
subl %edx, %r13d
imull %edi, %r13d
movl %eax, %r14d
subl %esi, %r14d
imull %r10d, %r14d
addl %r13d, %r14d
testl %r14d, %r14d
jg .LBB12_3
# %bb.4: # in Loop: Header=BB12_2 Depth=1
subl 56(%rsp), %ebp # 4-byte Folded Reload
imull 20(%rsp), %ebp # 4-byte Folded Reload
movq 64(%rsp), %r14 # 8-byte Reload
# kill: def $r14d killed $r14d killed $r14
subl %esi, %r14d
imull 16(%rsp), %r14d # 4-byte Folded Reload
addl %ebp, %r14d
testl %r14d, %r14d
jle .LBB12_6
# %bb.5: # in Loop: Header=BB12_2 Depth=1
movl 8(%rsp), %ebp # 4-byte Reload
movslq %ebp, %r13
incl %ebp
movl %ebp, 8(%rsp) # 4-byte Spill
movq 32(%rsp), %rbp # 8-byte Reload
movq %rsi, (%rbp,%r13,8)
cmpl %r15d, %r14d
cmovgl %r14d, %r15d
movl 12(%rsp), %esi # 4-byte Reload
cmovgl %r13d, %esi
movl %esi, 12(%rsp) # 4-byte Spill
jmp .LBB12_6
.LBB12_7: # %._crit_edge
movq (%r12), %rsi
movq 24(%rsp), %r15 # 8-byte Reload
movq -8(%r12,%r15,8), %rdx
movl $1, (%rsp)
movq 40(%rsp), %r14 # 8-byte Reload
movq %r14, %rdi
movq 48(%rsp), %rbx # 8-byte Reload
movq %rbx, %rcx
callq _Z14serialFindHullP5PointS_S_P4Edgeiii
movq -8(%r12,%r15,8), %rcx
movq (%r12), %rdx
movslq %eax, %rsi
incl %eax
shlq $4, %rsi
movq %rcx, (%rbx,%rsi)
movq %rdx, 8(%rbx,%rsi)
movq -8(%r12,%r15,8), %rsi
movq (%r12), %rdx
movl %eax, (%rsp)
movq 32(%rsp), %r15 # 8-byte Reload
movq %r15, %rdi
movq %rbx, %rcx
movl 8(%rsp), %r8d # 4-byte Reload
movl 12(%rsp), %r9d # 4-byte Reload
callq _Z14serialFindHullP5PointS_S_P4Edgeiii
movl %eax, %ebx
movq %r14, %rdi
callq free
movq %r15, %rdi
callq free
movl %ebx, %eax
addq $72, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end12:
.size _Z10serialHullP5PointP4Edgei, .Lfunc_end12-_Z10serialHullP5PointP4Edgei
.cfi_endproc
# -- End function
.globl _Z17doSerialQuickHullP5PointP4Edgei # -- Begin function _Z17doSerialQuickHullP5PointP4Edgei
.p2align 4, 0x90
.type _Z17doSerialQuickHullP5PointP4Edgei,@function
_Z17doSerialQuickHullP5PointP4Edgei: # @_Z17doSerialQuickHullP5PointP4Edgei
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $32, %rsp
.cfi_def_cfa_offset 64
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl %edx, %ebx
movq %rsi, %r14
movq %rdi, %r15
leaq 24(%rsp), %rdi
callq hipEventCreate
leaq 16(%rsp), %rdi
callq hipEventCreate
movq 24(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
leal -1(%rbx), %edx
movq %r15, %rdi
xorl %esi, %esi
callq _Z18quickSortIterativeP5Pointii
movq %r15, %rdi
movq %r14, %rsi
movl %ebx, %edx
callq _Z10serialHullP5PointP4Edgei
movq 16(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 24(%rsp), %rsi
movq 16(%rsp), %rdx
leaq 12(%rsp), %rdi
callq hipEventElapsedTime
movq stderr(%rip), %rdi
movss 12(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.5, %esi
movb $1, %al
callq fprintf
addq $32, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end13:
.size _Z17doSerialQuickHullP5PointP4Edgei, .Lfunc_end13-_Z17doSerialQuickHullP5PointP4Edgei
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $24, %rsp
.cfi_def_cfa_offset 80
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq 8(%rsi), %rdi
movl $.L.str.6, %esi
callq fopen
movq %rax, %rbx
leaq 12(%rsp), %rdx
movl $.L.str.7, %esi
movq %rax, %rdi
xorl %eax, %eax
callq __isoc23_fscanf
movslq 12(%rsp), %r12
leaq (,%r12,8), %rdi
movq %r12, %r15
shlq $4, %r15
callq malloc
movq %rax, %r14
movq %r15, %rdi
callq malloc
movq %rax, %r15
testq %r12, %r12
jle .LBB14_3
# %bb.1: # %.lr.ph
leaq 20(%rsp), %r12
leaq 16(%rsp), %r13
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB14_2: # =>This Inner Loop Header: Depth=1
movl $.L.str.8, %esi
movq %rbx, %rdi
movq %r13, %rdx
movq %r12, %rcx
xorl %eax, %eax
callq __isoc23_fscanf
movq 16(%rsp), %rax
movq %rax, (%r14,%rbp,8)
incq %rbp
movslq 12(%rsp), %rax
cmpq %rax, %rbp
jl .LBB14_2
.LBB14_3: # %._crit_edge
movq stderr(%rip), %rcx
movl $.L.str.9, %edi
movl $11, %esi
movl $1, %edx
callq fwrite@PLT
movl 12(%rsp), %edx
movq %r14, %rdi
movq %r15, %rsi
callq _Z17doSerialQuickHullP5PointP4Edgei
movq stderr(%rip), %rcx
movl $.L.str.10, %edi
movl $21, %esi
movl $1, %edx
callq fwrite@PLT
xorl %eax, %eax
addq $24, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end14:
.size main, .Lfunc_end14-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB15_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB15_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9quickhullP5PointS0_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end15:
.size __hip_module_ctor, .Lfunc_end15-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB16_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB16_2:
retq
.Lfunc_end16:
.size __hip_module_dtor, .Lfunc_end16-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9quickhullP5PointS0_i,@object # @_Z9quickhullP5PointS0_i
.section .rodata,"a",@progbits
.globl _Z9quickhullP5PointS0_i
.p2align 3, 0x0
_Z9quickhullP5PointS0_i:
.quad _Z24__device_stub__quickhullP5PointS0_i
.size _Z9quickhullP5PointS0_i, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "%s\n"
.size .L.str, 4
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "%d,%d->%d,%d%s"
.size .L.str.1, 15
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "\n"
.size .L.str.2, 2
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz ", "
.size .L.str.3, 3
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "%d %d\n"
.size .L.str.4, 7
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "serial quickhull runtime: %f ms\n"
.size .L.str.5, 33
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "r"
.size .L.str.6, 2
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "%d"
.size .L.str.7, 3
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "%d %d"
.size .L.str.8, 6
.type .L.str.9,@object # @.str.9
.L.str.9:
.asciz "Read input\n"
.size .L.str.9, 12
.type .L.str.10,@object # @.str.10
.L.str.10:
.asciz "Quick Hull completed\n"
.size .L.str.10, 22
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z9quickhullP5PointS0_i"
.size .L__unnamed_1, 24
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__quickhullP5PointS0_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9quickhullP5PointS0_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_
|
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
|
code for sm_80
Function : _Z9quickhullP5PointS0_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0020*/ BRA 0x20; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0030*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0040*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0050*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0060*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0070*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0080*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0090*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
|
.text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z9quickhullP5PointS0_i
.globl _Z9quickhullP5PointS0_i
.p2align 8
.type _Z9quickhullP5PointS0_i,@function
_Z9quickhullP5PointS0_i:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9quickhullP5PointS0_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 20
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 1
.amdhsa_next_free_sgpr 1
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z9quickhullP5PointS0_i, .Lfunc_end0-_Z9quickhullP5PointS0_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 20
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9quickhullP5PointS0_i
.private_segment_fixed_size: 0
.sgpr_count: 0
.sgpr_spill_count: 0
.symbol: _Z9quickhullP5PointS0_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 0
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata
|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
|
#include "includes.h"
__global__ void TgvComputeOpticalFlowVectorKernel(const float *u, const float2 *tv2, int width, int height, int stride, float2 *warpUV)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
float us = u[pos];
float2 tv2s = tv2[pos];
warpUV[pos].x = us * tv2s.x;
warpUV[pos].y = us * tv2s.y;
}
|
code for sm_80
Function : _Z33TgvComputeOpticalFlowVectorKernelPKfPK6float2iiiPS1_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R3, SR_CTAID.Y ; /* 0x0000000000037919 */
/* 0x000e280000002600 */
/*0020*/ S2R R2, SR_TID.Y ; /* 0x0000000000027919 */
/* 0x000e280000002200 */
/*0030*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e680000002100 */
/*0040*/ S2R R5, SR_CTAID.X ; /* 0x0000000000057919 */
/* 0x000e620000002500 */
/*0050*/ IMAD R3, R3, c[0x0][0x4], R2 ; /* 0x0000010003037a24 */
/* 0x001fca00078e0202 */
/*0060*/ ISETP.GE.AND P0, PT, R3, c[0x0][0x174], PT ; /* 0x00005d0003007a0c */
/* 0x000fe20003f06270 */
/*0070*/ IMAD R0, R5, c[0x0][0x0], R0 ; /* 0x0000000005007a24 */
/* 0x002fca00078e0200 */
/*0080*/ ISETP.GE.OR P0, PT, R0, c[0x0][0x170], P0 ; /* 0x00005c0000007a0c */
/* 0x000fe20000706670 */
/*0090*/ IMAD R0, R3, c[0x0][0x178], R0 ; /* 0x00005e0003007a24 */
/* 0x000fd800078e0200 */
/*00a0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00b0*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fe200000001ff */
/*00c0*/ MOV R7, 0x8 ; /* 0x0000000800077802 */
/* 0x000fe20000000f00 */
/*00d0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc80000000a00 */
/*00e0*/ IMAD.WIDE R4, R0, R7, c[0x0][0x168] ; /* 0x00005a0000047625 */
/* 0x000fc800078e0207 */
/*00f0*/ IMAD.WIDE R2, R0.reuse, R3, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x040fe400078e0203 */
/*0100*/ LDG.E.64 R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1b00 */
/*0110*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea2000c1e1900 */
/*0120*/ IMAD.WIDE R6, R0, R7, c[0x0][0x180] ; /* 0x0000600000067625 */
/* 0x000fc800078e0207 */
/*0130*/ FMUL R9, R5, R2.reuse ; /* 0x0000000205097220 */
/* 0x084fe40000400000 */
/*0140*/ FMUL R8, R4, R2 ; /* 0x0000000204087220 */
/* 0x000fca0000400000 */
/*0150*/ STG.E.64 [R6.64], R8 ; /* 0x0000000806007986 */
/* 0x000fe2000c101b04 */
/*0160*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0170*/ BRA 0x170; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
|
#include "includes.h"
__global__ void TgvComputeOpticalFlowVectorKernel(const float *u, const float2 *tv2, int width, int height, int stride, float2 *warpUV)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
float us = u[pos];
float2 tv2s = tv2[pos];
warpUV[pos].x = us * tv2s.x;
warpUV[pos].y = us * tv2s.y;
}
|
.file "tmpxft_001b246f_00000000-6_TgvComputeOpticalFlowVectorKernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z70__device_stub__Z33TgvComputeOpticalFlowVectorKernelPKfPK6float2iiiPS1_PKfPK6float2iiiPS1_
.type _Z70__device_stub__Z33TgvComputeOpticalFlowVectorKernelPKfPK6float2iiiPS1_PKfPK6float2iiiPS1_, @function
_Z70__device_stub__Z33TgvComputeOpticalFlowVectorKernelPKfPK6float2iiiPS1_PKfPK6float2iiiPS1_:
.LFB2051:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movl %edx, 28(%rsp)
movl %ecx, 24(%rsp)
movl %r8d, 20(%rsp)
movq %r9, 8(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 28(%rsp), %rax
movq %rax, 128(%rsp)
leaq 24(%rsp), %rax
movq %rax, 136(%rsp)
leaq 20(%rsp), %rax
movq %rax, 144(%rsp)
leaq 8(%rsp), %rax
movq %rax, 152(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z33TgvComputeOpticalFlowVectorKernelPKfPK6float2iiiPS1_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z70__device_stub__Z33TgvComputeOpticalFlowVectorKernelPKfPK6float2iiiPS1_PKfPK6float2iiiPS1_, .-_Z70__device_stub__Z33TgvComputeOpticalFlowVectorKernelPKfPK6float2iiiPS1_PKfPK6float2iiiPS1_
.globl _Z33TgvComputeOpticalFlowVectorKernelPKfPK6float2iiiPS1_
.type _Z33TgvComputeOpticalFlowVectorKernelPKfPK6float2iiiPS1_, @function
_Z33TgvComputeOpticalFlowVectorKernelPKfPK6float2iiiPS1_:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z70__device_stub__Z33TgvComputeOpticalFlowVectorKernelPKfPK6float2iiiPS1_PKfPK6float2iiiPS1_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z33TgvComputeOpticalFlowVectorKernelPKfPK6float2iiiPS1_, .-_Z33TgvComputeOpticalFlowVectorKernelPKfPK6float2iiiPS1_
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z33TgvComputeOpticalFlowVectorKernelPKfPK6float2iiiPS1_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z33TgvComputeOpticalFlowVectorKernelPKfPK6float2iiiPS1_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4:
|
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
|
#include "includes.h"
__global__ void TgvComputeOpticalFlowVectorKernel(const float *u, const float2 *tv2, int width, int height, int stride, float2 *warpUV)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
float us = u[pos];
float2 tv2s = tv2[pos];
warpUV[pos].x = us * tv2s.x;
warpUV[pos].y = us * tv2s.y;
}
|
#include <hip/hip_runtime.h>
#include "includes.h"
__global__ void TgvComputeOpticalFlowVectorKernel(const float *u, const float2 *tv2, int width, int height, int stride, float2 *warpUV)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
float us = u[pos];
float2 tv2s = tv2[pos];
warpUV[pos].x = us * tv2s.x;
warpUV[pos].y = us * tv2s.y;
}
|
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
|
#include <hip/hip_runtime.h>
#include "includes.h"
__global__ void TgvComputeOpticalFlowVectorKernel(const float *u, const float2 *tv2, int width, int height, int stride, float2 *warpUV)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
float us = u[pos];
float2 tv2s = tv2[pos];
warpUV[pos].x = us * tv2s.x;
warpUV[pos].y = us * tv2s.y;
}
|
.text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z33TgvComputeOpticalFlowVectorKernelPKfPK15HIP_vector_typeIfLj2EEiiiPS2_
.globl _Z33TgvComputeOpticalFlowVectorKernelPKfPK15HIP_vector_typeIfLj2EEiiiPS2_
.p2align 8
.type _Z33TgvComputeOpticalFlowVectorKernelPKfPK15HIP_vector_typeIfLj2EEiiiPS2_,@function
_Z33TgvComputeOpticalFlowVectorKernelPKfPK15HIP_vector_typeIfLj2EEiiiPS2_:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x34
s_load_b64 s[2:3], s[0:1], 0x10
v_and_b32_e32 v2, 0x3ff, v0
v_bfe_u32 v3, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_and_b32 s5, s4, 0xffff
s_lshr_b32 s4, s4, 16
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_mad_u64_u32 v[0:1], null, s14, s5, v[2:3]
v_mad_u64_u32 v[1:2], null, s15, s4, v[3:4]
v_cmp_gt_i32_e32 vcc_lo, s2, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_gt_i32_e64 s2, s3, v1
s_and_b32 s2, vcc_lo, s2
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_2
s_clause 0x2
s_load_b32 s2, s[0:1], 0x18
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[0:1], s[0:1], 0x20
s_waitcnt lgkmcnt(0)
v_mad_u64_u32 v[2:3], null, v1, s2, v[0:1]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v3, 31, v2
v_lshlrev_b64 v[0:1], 2, v[2:3]
v_lshlrev_b64 v[2:3], 3, v[2:3]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v0, vcc_lo, s4, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s5, v1, vcc_lo
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_u32 v4, vcc_lo, s6, v2
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v3, vcc_lo
v_add_co_u32 v2, vcc_lo, s0, v2
global_load_b32 v6, v[0:1], off
global_load_b64 v[0:1], v[4:5], off
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v3, vcc_lo
s_waitcnt vmcnt(0)
v_mul_f32_e32 v0, v6, v0
v_mul_f32_e32 v1, v6, v1
global_store_b64 v[2:3], v[0:1], off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z33TgvComputeOpticalFlowVectorKernelPKfPK15HIP_vector_typeIfLj2EEiiiPS2_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 296
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 7
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z33TgvComputeOpticalFlowVectorKernelPKfPK15HIP_vector_typeIfLj2EEiiiPS2_, .Lfunc_end0-_Z33TgvComputeOpticalFlowVectorKernelPKfPK15HIP_vector_typeIfLj2EEiiiPS2_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 32
.size: 8
.value_kind: global_buffer
- .offset: 40
.size: 4
.value_kind: hidden_block_count_x
- .offset: 44
.size: 4
.value_kind: hidden_block_count_y
- .offset: 48
.size: 4
.value_kind: hidden_block_count_z
- .offset: 52
.size: 2
.value_kind: hidden_group_size_x
- .offset: 54
.size: 2
.value_kind: hidden_group_size_y
- .offset: 56
.size: 2
.value_kind: hidden_group_size_z
- .offset: 58
.size: 2
.value_kind: hidden_remainder_x
- .offset: 60
.size: 2
.value_kind: hidden_remainder_y
- .offset: 62
.size: 2
.value_kind: hidden_remainder_z
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 104
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 296
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z33TgvComputeOpticalFlowVectorKernelPKfPK15HIP_vector_typeIfLj2EEiiiPS2_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z33TgvComputeOpticalFlowVectorKernelPKfPK15HIP_vector_typeIfLj2EEiiiPS2_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 7
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata
|
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
|
#include <hip/hip_runtime.h>
#include "includes.h"
__global__ void TgvComputeOpticalFlowVectorKernel(const float *u, const float2 *tv2, int width, int height, int stride, float2 *warpUV)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
float us = u[pos];
float2 tv2s = tv2[pos];
warpUV[pos].x = us * tv2s.x;
warpUV[pos].y = us * tv2s.y;
}
|
.text
.file "TgvComputeOpticalFlowVectorKernel.hip"
.globl _Z48__device_stub__TgvComputeOpticalFlowVectorKernelPKfPK15HIP_vector_typeIfLj2EEiiiPS2_ # -- Begin function _Z48__device_stub__TgvComputeOpticalFlowVectorKernelPKfPK15HIP_vector_typeIfLj2EEiiiPS2_
.p2align 4, 0x90
.type _Z48__device_stub__TgvComputeOpticalFlowVectorKernelPKfPK15HIP_vector_typeIfLj2EEiiiPS2_,@function
_Z48__device_stub__TgvComputeOpticalFlowVectorKernelPKfPK15HIP_vector_typeIfLj2EEiiiPS2_: # @_Z48__device_stub__TgvComputeOpticalFlowVectorKernelPKfPK15HIP_vector_typeIfLj2EEiiiPS2_
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movl %edx, 20(%rsp)
movl %ecx, 16(%rsp)
movl %r8d, 12(%rsp)
movq %r9, 72(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 20(%rsp), %rax
movq %rax, 112(%rsp)
leaq 16(%rsp), %rax
movq %rax, 120(%rsp)
leaq 12(%rsp), %rax
movq %rax, 128(%rsp)
leaq 72(%rsp), %rax
movq %rax, 136(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z33TgvComputeOpticalFlowVectorKernelPKfPK15HIP_vector_typeIfLj2EEiiiPS2_, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end0:
.size _Z48__device_stub__TgvComputeOpticalFlowVectorKernelPKfPK15HIP_vector_typeIfLj2EEiiiPS2_, .Lfunc_end0-_Z48__device_stub__TgvComputeOpticalFlowVectorKernelPKfPK15HIP_vector_typeIfLj2EEiiiPS2_
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z33TgvComputeOpticalFlowVectorKernelPKfPK15HIP_vector_typeIfLj2EEiiiPS2_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z33TgvComputeOpticalFlowVectorKernelPKfPK15HIP_vector_typeIfLj2EEiiiPS2_,@object # @_Z33TgvComputeOpticalFlowVectorKernelPKfPK15HIP_vector_typeIfLj2EEiiiPS2_
.section .rodata,"a",@progbits
.globl _Z33TgvComputeOpticalFlowVectorKernelPKfPK15HIP_vector_typeIfLj2EEiiiPS2_
.p2align 3, 0x0
_Z33TgvComputeOpticalFlowVectorKernelPKfPK15HIP_vector_typeIfLj2EEiiiPS2_:
.quad _Z48__device_stub__TgvComputeOpticalFlowVectorKernelPKfPK15HIP_vector_typeIfLj2EEiiiPS2_
.size _Z33TgvComputeOpticalFlowVectorKernelPKfPK15HIP_vector_typeIfLj2EEiiiPS2_, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z33TgvComputeOpticalFlowVectorKernelPKfPK15HIP_vector_typeIfLj2EEiiiPS2_"
.size .L__unnamed_1, 74
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z48__device_stub__TgvComputeOpticalFlowVectorKernelPKfPK15HIP_vector_typeIfLj2EEiiiPS2_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z33TgvComputeOpticalFlowVectorKernelPKfPK15HIP_vector_typeIfLj2EEiiiPS2_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_
|
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
|
code for sm_80
Function : _Z33TgvComputeOpticalFlowVectorKernelPKfPK6float2iiiPS1_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R3, SR_CTAID.Y ; /* 0x0000000000037919 */
/* 0x000e280000002600 */
/*0020*/ S2R R2, SR_TID.Y ; /* 0x0000000000027919 */
/* 0x000e280000002200 */
/*0030*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e680000002100 */
/*0040*/ S2R R5, SR_CTAID.X ; /* 0x0000000000057919 */
/* 0x000e620000002500 */
/*0050*/ IMAD R3, R3, c[0x0][0x4], R2 ; /* 0x0000010003037a24 */
/* 0x001fca00078e0202 */
/*0060*/ ISETP.GE.AND P0, PT, R3, c[0x0][0x174], PT ; /* 0x00005d0003007a0c */
/* 0x000fe20003f06270 */
/*0070*/ IMAD R0, R5, c[0x0][0x0], R0 ; /* 0x0000000005007a24 */
/* 0x002fca00078e0200 */
/*0080*/ ISETP.GE.OR P0, PT, R0, c[0x0][0x170], P0 ; /* 0x00005c0000007a0c */
/* 0x000fe20000706670 */
/*0090*/ IMAD R0, R3, c[0x0][0x178], R0 ; /* 0x00005e0003007a24 */
/* 0x000fd800078e0200 */
/*00a0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00b0*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fe200000001ff */
/*00c0*/ MOV R7, 0x8 ; /* 0x0000000800077802 */
/* 0x000fe20000000f00 */
/*00d0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc80000000a00 */
/*00e0*/ IMAD.WIDE R4, R0, R7, c[0x0][0x168] ; /* 0x00005a0000047625 */
/* 0x000fc800078e0207 */
/*00f0*/ IMAD.WIDE R2, R0.reuse, R3, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x040fe400078e0203 */
/*0100*/ LDG.E.64 R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1b00 */
/*0110*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea2000c1e1900 */
/*0120*/ IMAD.WIDE R6, R0, R7, c[0x0][0x180] ; /* 0x0000600000067625 */
/* 0x000fc800078e0207 */
/*0130*/ FMUL R9, R5, R2.reuse ; /* 0x0000000205097220 */
/* 0x084fe40000400000 */
/*0140*/ FMUL R8, R4, R2 ; /* 0x0000000204087220 */
/* 0x000fca0000400000 */
/*0150*/ STG.E.64 [R6.64], R8 ; /* 0x0000000806007986 */
/* 0x000fe2000c101b04 */
/*0160*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0170*/ BRA 0x170; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
|
.text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z33TgvComputeOpticalFlowVectorKernelPKfPK15HIP_vector_typeIfLj2EEiiiPS2_
.globl _Z33TgvComputeOpticalFlowVectorKernelPKfPK15HIP_vector_typeIfLj2EEiiiPS2_
.p2align 8
.type _Z33TgvComputeOpticalFlowVectorKernelPKfPK15HIP_vector_typeIfLj2EEiiiPS2_,@function
_Z33TgvComputeOpticalFlowVectorKernelPKfPK15HIP_vector_typeIfLj2EEiiiPS2_:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x34
s_load_b64 s[2:3], s[0:1], 0x10
v_and_b32_e32 v2, 0x3ff, v0
v_bfe_u32 v3, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_and_b32 s5, s4, 0xffff
s_lshr_b32 s4, s4, 16
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_mad_u64_u32 v[0:1], null, s14, s5, v[2:3]
v_mad_u64_u32 v[1:2], null, s15, s4, v[3:4]
v_cmp_gt_i32_e32 vcc_lo, s2, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_gt_i32_e64 s2, s3, v1
s_and_b32 s2, vcc_lo, s2
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_2
s_clause 0x2
s_load_b32 s2, s[0:1], 0x18
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[0:1], s[0:1], 0x20
s_waitcnt lgkmcnt(0)
v_mad_u64_u32 v[2:3], null, v1, s2, v[0:1]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v3, 31, v2
v_lshlrev_b64 v[0:1], 2, v[2:3]
v_lshlrev_b64 v[2:3], 3, v[2:3]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v0, vcc_lo, s4, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s5, v1, vcc_lo
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_u32 v4, vcc_lo, s6, v2
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v3, vcc_lo
v_add_co_u32 v2, vcc_lo, s0, v2
global_load_b32 v6, v[0:1], off
global_load_b64 v[0:1], v[4:5], off
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v3, vcc_lo
s_waitcnt vmcnt(0)
v_mul_f32_e32 v0, v6, v0
v_mul_f32_e32 v1, v6, v1
global_store_b64 v[2:3], v[0:1], off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z33TgvComputeOpticalFlowVectorKernelPKfPK15HIP_vector_typeIfLj2EEiiiPS2_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 296
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 7
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z33TgvComputeOpticalFlowVectorKernelPKfPK15HIP_vector_typeIfLj2EEiiiPS2_, .Lfunc_end0-_Z33TgvComputeOpticalFlowVectorKernelPKfPK15HIP_vector_typeIfLj2EEiiiPS2_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 32
.size: 8
.value_kind: global_buffer
- .offset: 40
.size: 4
.value_kind: hidden_block_count_x
- .offset: 44
.size: 4
.value_kind: hidden_block_count_y
- .offset: 48
.size: 4
.value_kind: hidden_block_count_z
- .offset: 52
.size: 2
.value_kind: hidden_group_size_x
- .offset: 54
.size: 2
.value_kind: hidden_group_size_y
- .offset: 56
.size: 2
.value_kind: hidden_group_size_z
- .offset: 58
.size: 2
.value_kind: hidden_remainder_x
- .offset: 60
.size: 2
.value_kind: hidden_remainder_y
- .offset: 62
.size: 2
.value_kind: hidden_remainder_z
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 104
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 296
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z33TgvComputeOpticalFlowVectorKernelPKfPK15HIP_vector_typeIfLj2EEiiiPS2_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z33TgvComputeOpticalFlowVectorKernelPKfPK15HIP_vector_typeIfLj2EEiiiPS2_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 7
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata
|
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
|
.file "tmpxft_001b246f_00000000-6_TgvComputeOpticalFlowVectorKernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z70__device_stub__Z33TgvComputeOpticalFlowVectorKernelPKfPK6float2iiiPS1_PKfPK6float2iiiPS1_
.type _Z70__device_stub__Z33TgvComputeOpticalFlowVectorKernelPKfPK6float2iiiPS1_PKfPK6float2iiiPS1_, @function
_Z70__device_stub__Z33TgvComputeOpticalFlowVectorKernelPKfPK6float2iiiPS1_PKfPK6float2iiiPS1_:
.LFB2051:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movl %edx, 28(%rsp)
movl %ecx, 24(%rsp)
movl %r8d, 20(%rsp)
movq %r9, 8(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 28(%rsp), %rax
movq %rax, 128(%rsp)
leaq 24(%rsp), %rax
movq %rax, 136(%rsp)
leaq 20(%rsp), %rax
movq %rax, 144(%rsp)
leaq 8(%rsp), %rax
movq %rax, 152(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z33TgvComputeOpticalFlowVectorKernelPKfPK6float2iiiPS1_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z70__device_stub__Z33TgvComputeOpticalFlowVectorKernelPKfPK6float2iiiPS1_PKfPK6float2iiiPS1_, .-_Z70__device_stub__Z33TgvComputeOpticalFlowVectorKernelPKfPK6float2iiiPS1_PKfPK6float2iiiPS1_
.globl _Z33TgvComputeOpticalFlowVectorKernelPKfPK6float2iiiPS1_
.type _Z33TgvComputeOpticalFlowVectorKernelPKfPK6float2iiiPS1_, @function
_Z33TgvComputeOpticalFlowVectorKernelPKfPK6float2iiiPS1_:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z70__device_stub__Z33TgvComputeOpticalFlowVectorKernelPKfPK6float2iiiPS1_PKfPK6float2iiiPS1_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z33TgvComputeOpticalFlowVectorKernelPKfPK6float2iiiPS1_, .-_Z33TgvComputeOpticalFlowVectorKernelPKfPK6float2iiiPS1_
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z33TgvComputeOpticalFlowVectorKernelPKfPK6float2iiiPS1_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z33TgvComputeOpticalFlowVectorKernelPKfPK6float2iiiPS1_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4:
|
.text
.file "TgvComputeOpticalFlowVectorKernel.hip"
.globl _Z48__device_stub__TgvComputeOpticalFlowVectorKernelPKfPK15HIP_vector_typeIfLj2EEiiiPS2_ # -- Begin function _Z48__device_stub__TgvComputeOpticalFlowVectorKernelPKfPK15HIP_vector_typeIfLj2EEiiiPS2_
.p2align 4, 0x90
.type _Z48__device_stub__TgvComputeOpticalFlowVectorKernelPKfPK15HIP_vector_typeIfLj2EEiiiPS2_,@function
_Z48__device_stub__TgvComputeOpticalFlowVectorKernelPKfPK15HIP_vector_typeIfLj2EEiiiPS2_: # @_Z48__device_stub__TgvComputeOpticalFlowVectorKernelPKfPK15HIP_vector_typeIfLj2EEiiiPS2_
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movl %edx, 20(%rsp)
movl %ecx, 16(%rsp)
movl %r8d, 12(%rsp)
movq %r9, 72(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 20(%rsp), %rax
movq %rax, 112(%rsp)
leaq 16(%rsp), %rax
movq %rax, 120(%rsp)
leaq 12(%rsp), %rax
movq %rax, 128(%rsp)
leaq 72(%rsp), %rax
movq %rax, 136(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z33TgvComputeOpticalFlowVectorKernelPKfPK15HIP_vector_typeIfLj2EEiiiPS2_, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end0:
.size _Z48__device_stub__TgvComputeOpticalFlowVectorKernelPKfPK15HIP_vector_typeIfLj2EEiiiPS2_, .Lfunc_end0-_Z48__device_stub__TgvComputeOpticalFlowVectorKernelPKfPK15HIP_vector_typeIfLj2EEiiiPS2_
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z33TgvComputeOpticalFlowVectorKernelPKfPK15HIP_vector_typeIfLj2EEiiiPS2_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z33TgvComputeOpticalFlowVectorKernelPKfPK15HIP_vector_typeIfLj2EEiiiPS2_,@object # @_Z33TgvComputeOpticalFlowVectorKernelPKfPK15HIP_vector_typeIfLj2EEiiiPS2_
.section .rodata,"a",@progbits
.globl _Z33TgvComputeOpticalFlowVectorKernelPKfPK15HIP_vector_typeIfLj2EEiiiPS2_
.p2align 3, 0x0
_Z33TgvComputeOpticalFlowVectorKernelPKfPK15HIP_vector_typeIfLj2EEiiiPS2_:
.quad _Z48__device_stub__TgvComputeOpticalFlowVectorKernelPKfPK15HIP_vector_typeIfLj2EEiiiPS2_
.size _Z33TgvComputeOpticalFlowVectorKernelPKfPK15HIP_vector_typeIfLj2EEiiiPS2_, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z33TgvComputeOpticalFlowVectorKernelPKfPK15HIP_vector_typeIfLj2EEiiiPS2_"
.size .L__unnamed_1, 74
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z48__device_stub__TgvComputeOpticalFlowVectorKernelPKfPK15HIP_vector_typeIfLj2EEiiiPS2_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z33TgvComputeOpticalFlowVectorKernelPKfPK15HIP_vector_typeIfLj2EEiiiPS2_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_
|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
|
#include <iostream>
__global__ void add(int*a, int*b, int*c)
{
c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x];
}
void random_ints(int* a, int N)
{
int i;
for (i = 0; i < N; ++i)
{
a[i] = rand()%1000;
}
}
#define N 1000000000
int main(void){
int *a,*b,*c; //host variables
int *d_a, *d_b, *d_c; //device variables
int size=N*sizeof(int);
//allocate space on device for a,b and c
cudaMalloc((void**)&d_a, size);
cudaMalloc((void**)&d_b, size);
cudaMalloc((void**)&d_c, size);
a = (int *)malloc(size);
random_ints(a, N);
b = (int *)malloc(size);
random_ints(b, N);
c = (int *)malloc(size);
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
//launch kernel for N blocks
add<<<N,1>>>(d_a,d_b,d_c);
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
cudaError_t error=cudaGetLastError();
if(error!=cudaSuccess){
printf("Error: %s\n",cudaGetErrorString(error));
}
/*for (int i=0;i<N;i++)
{
std::cout<<a[i]<<"+"<<b[i]<<"="<<c[i]<<std::endl;
}
*/
std::cout<<"done\n";
free(a);
free(b);
free(c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
code for sm_80
Function : _Z3addPiS_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e220000002500 */
/*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0040*/ IMAD.WIDE.U32 R2, R6, R7, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x001fc800078e0007 */
/*0050*/ IMAD.WIDE.U32 R4, R6.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x0c0fe400078e0007 */
/*0060*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*0070*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea2000c1e1900 */
/*0080*/ IMAD.WIDE.U32 R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fe200078e0007 */
/*0090*/ IADD3 R9, R2, R5, RZ ; /* 0x0000000502097210 */
/* 0x004fca0007ffe0ff */
/*00a0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00b0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00c0*/ BRA 0xc0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
|
#include <iostream>
__global__ void add(int*a, int*b, int*c)
{
c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x];
}
void random_ints(int* a, int N)
{
int i;
for (i = 0; i < N; ++i)
{
a[i] = rand()%1000;
}
}
#define N 1000000000
int main(void){
int *a,*b,*c; //host variables
int *d_a, *d_b, *d_c; //device variables
int size=N*sizeof(int);
//allocate space on device for a,b and c
cudaMalloc((void**)&d_a, size);
cudaMalloc((void**)&d_b, size);
cudaMalloc((void**)&d_c, size);
a = (int *)malloc(size);
random_ints(a, N);
b = (int *)malloc(size);
random_ints(b, N);
c = (int *)malloc(size);
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
//launch kernel for N blocks
add<<<N,1>>>(d_a,d_b,d_c);
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
cudaError_t error=cudaGetLastError();
if(error!=cudaSuccess){
printf("Error: %s\n",cudaGetErrorString(error));
}
/*for (int i=0;i<N;i++)
{
std::cout<<a[i]<<"+"<<b[i]<<"="<<c[i]<<std::endl;
}
*/
std::cout<<"done\n";
free(a);
free(b);
free(c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
.file "tmpxft_00172314_00000000-6_vector_add_blocks.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3673:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3673:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z11random_intsPii
.type _Z11random_intsPii, @function
_Z11random_intsPii:
.LFB3669:
.cfi_startproc
endbr64
testl %esi, %esi
jle .L8
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $8, %rsp
.cfi_def_cfa_offset 32
movq %rdi, %rbx
movslq %esi, %rsi
leaq (%rdi,%rsi,4), %rbp
.L5:
call rand@PLT
movslq %eax, %rdx
imulq $274877907, %rdx, %rdx
sarq $38, %rdx
movl %eax, %ecx
sarl $31, %ecx
subl %ecx, %edx
imull $1000, %edx, %edx
subl %edx, %eax
movl %eax, (%rbx)
addq $4, %rbx
cmpq %rbp, %rbx
jne .L5
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L8:
.cfi_restore 3
.cfi_restore 6
ret
.cfi_endproc
.LFE3669:
.size _Z11random_intsPii, .-_Z11random_intsPii
.globl _Z26__device_stub__Z3addPiS_S_PiS_S_
.type _Z26__device_stub__Z3addPiS_S_PiS_S_, @function
_Z26__device_stub__Z3addPiS_S_PiS_S_:
.LFB3695:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z3addPiS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3695:
.size _Z26__device_stub__Z3addPiS_S_PiS_S_, .-_Z26__device_stub__Z3addPiS_S_PiS_S_
.globl _Z3addPiS_S_
.type _Z3addPiS_S_, @function
_Z3addPiS_S_:
.LFB3696:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z26__device_stub__Z3addPiS_S_PiS_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3696:
.size _Z3addPiS_S_, .-_Z3addPiS_S_
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Error: %s\n"
.LC1:
.string "done\n"
.text
.globl main
.type main, @function
main:
.LFB3670:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $64, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rdi
movq $-294967296, %rsi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movq $-294967296, %rsi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movq $-294967296, %rsi
call cudaMalloc@PLT
movq $-294967296, %rdi
call malloc@PLT
movq %rax, %rbp
movl $1000000000, %esi
movq %rax, %rdi
call _Z11random_intsPii
movq $-294967296, %rdi
call malloc@PLT
movq %rax, %rbx
movl $1000000000, %esi
movq %rax, %rdi
call _Z11random_intsPii
movq $-294967296, %rdi
call malloc@PLT
movq %rax, %r12
movl $1, %ecx
movq $-294967296, %rdx
movq %rbp, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movq $-294967296, %rdx
movq %rbx, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1000000000, 32(%rsp)
movl $1, 36(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L24
.L20:
movl $2, %ecx
movq $-294967296, %rdx
movq 24(%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
call cudaDeviceSynchronize@PLT
call cudaGetLastError@PLT
testl %eax, %eax
jne .L25
.L21:
leaq .LC1(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rbp, %rdi
call free@PLT
movq %rbx, %rdi
call free@PLT
movq %r12, %rdi
call free@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L26
movl $0, %eax
addq $64, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L24:
.cfi_restore_state
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z26__device_stub__Z3addPiS_S_PiS_S_
jmp .L20
.L25:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L21
.L26:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3670:
.size main, .-main
.section .rodata.str1.1
.LC2:
.string "_Z3addPiS_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3698:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z3addPiS_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3698:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4:
|
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
|
#include <iostream>
__global__ void add(int*a, int*b, int*c)
{
c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x];
}
void random_ints(int* a, int N)
{
int i;
for (i = 0; i < N; ++i)
{
a[i] = rand()%1000;
}
}
#define N 1000000000
int main(void){
int *a,*b,*c; //host variables
int *d_a, *d_b, *d_c; //device variables
int size=N*sizeof(int);
//allocate space on device for a,b and c
cudaMalloc((void**)&d_a, size);
cudaMalloc((void**)&d_b, size);
cudaMalloc((void**)&d_c, size);
a = (int *)malloc(size);
random_ints(a, N);
b = (int *)malloc(size);
random_ints(b, N);
c = (int *)malloc(size);
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
//launch kernel for N blocks
add<<<N,1>>>(d_a,d_b,d_c);
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
cudaError_t error=cudaGetLastError();
if(error!=cudaSuccess){
printf("Error: %s\n",cudaGetErrorString(error));
}
/*for (int i=0;i<N;i++)
{
std::cout<<a[i]<<"+"<<b[i]<<"="<<c[i]<<std::endl;
}
*/
std::cout<<"done\n";
free(a);
free(b);
free(c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
#include <hip/hip_runtime.h>
#include <iostream>
__global__ void add(int*a, int*b, int*c)
{
c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x];
}
void random_ints(int* a, int N)
{
int i;
for (i = 0; i < N; ++i)
{
a[i] = rand()%1000;
}
}
#define N 1000000000
int main(void){
int *a,*b,*c; //host variables
int *d_a, *d_b, *d_c; //device variables
int size=N*sizeof(int);
//allocate space on device for a,b and c
hipMalloc((void**)&d_a, size);
hipMalloc((void**)&d_b, size);
hipMalloc((void**)&d_c, size);
a = (int *)malloc(size);
random_ints(a, N);
b = (int *)malloc(size);
random_ints(b, N);
c = (int *)malloc(size);
hipMemcpy(d_a, a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, size, hipMemcpyHostToDevice);
//launch kernel for N blocks
add<<<N,1>>>(d_a,d_b,d_c);
hipMemcpy(c, d_c, size, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
hipError_t error=hipGetLastError();
if(error!=hipSuccess){
printf("Error: %s\n",hipGetErrorString(error));
}
/*for (int i=0;i<N;i++)
{
std::cout<<a[i]<<"+"<<b[i]<<"="<<c[i]<<std::endl;
}
*/
std::cout<<"done\n";
free(a);
free(b);
free(c);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
return 0;
}
|
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
|
#include <hip/hip_runtime.h>
#include <iostream>
__global__ void add(int*a, int*b, int*c)
{
c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x];
}
void random_ints(int* a, int N)
{
int i;
for (i = 0; i < N; ++i)
{
a[i] = rand()%1000;
}
}
#define N 1000000000
int main(void){
int *a,*b,*c; //host variables
int *d_a, *d_b, *d_c; //device variables
int size=N*sizeof(int);
//allocate space on device for a,b and c
hipMalloc((void**)&d_a, size);
hipMalloc((void**)&d_b, size);
hipMalloc((void**)&d_c, size);
a = (int *)malloc(size);
random_ints(a, N);
b = (int *)malloc(size);
random_ints(b, N);
c = (int *)malloc(size);
hipMemcpy(d_a, a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, size, hipMemcpyHostToDevice);
//launch kernel for N blocks
add<<<N,1>>>(d_a,d_b,d_c);
hipMemcpy(c, d_c, size, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
hipError_t error=hipGetLastError();
if(error!=hipSuccess){
printf("Error: %s\n",hipGetErrorString(error));
}
/*for (int i=0;i<N;i++)
{
std::cout<<a[i]<<"+"<<b[i]<<"="<<c[i]<<std::endl;
}
*/
std::cout<<"done\n";
free(a);
free(b);
free(c);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
return 0;
}
|
.text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z3addPiS_S_
.globl _Z3addPiS_S_
.p2align 8
.type _Z3addPiS_S_,@function
_Z3addPiS_S_:
s_load_b128 s[4:7], s[0:1], 0x0
s_mov_b32 s2, s15
s_mov_b32 s3, 0
s_load_b64 s[0:1], s[0:1], 0x10
s_lshl_b64 s[2:3], s[2:3], 2
s_waitcnt lgkmcnt(0)
s_add_u32 s4, s4, s2
s_addc_u32 s5, s5, s3
s_add_u32 s6, s6, s2
s_addc_u32 s7, s7, s3
s_load_b32 s4, s[4:5], 0x0
s_load_b32 s5, s[6:7], 0x0
s_waitcnt lgkmcnt(0)
s_add_i32 s4, s5, s4
s_delay_alu instid0(SALU_CYCLE_1)
v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s4
s_add_u32 s0, s0, s2
s_addc_u32 s1, s1, s3
global_store_b32 v0, v1, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z3addPiS_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 24
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 2
.amdhsa_next_free_sgpr 16
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z3addPiS_S_, .Lfunc_end0-_Z3addPiS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 24
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z3addPiS_S_
.private_segment_fixed_size: 0
.sgpr_count: 16
.sgpr_spill_count: 0
.symbol: _Z3addPiS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 2
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata
|
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
|
#include <hip/hip_runtime.h>
#include <iostream>
__global__ void add(int*a, int*b, int*c)
{
c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x];
}
void random_ints(int* a, int N)
{
int i;
for (i = 0; i < N; ++i)
{
a[i] = rand()%1000;
}
}
#define N 1000000000
int main(void){
int *a,*b,*c; //host variables
int *d_a, *d_b, *d_c; //device variables
int size=N*sizeof(int);
//allocate space on device for a,b and c
hipMalloc((void**)&d_a, size);
hipMalloc((void**)&d_b, size);
hipMalloc((void**)&d_c, size);
a = (int *)malloc(size);
random_ints(a, N);
b = (int *)malloc(size);
random_ints(b, N);
c = (int *)malloc(size);
hipMemcpy(d_a, a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, size, hipMemcpyHostToDevice);
//launch kernel for N blocks
add<<<N,1>>>(d_a,d_b,d_c);
hipMemcpy(c, d_c, size, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
hipError_t error=hipGetLastError();
if(error!=hipSuccess){
printf("Error: %s\n",hipGetErrorString(error));
}
/*for (int i=0;i<N;i++)
{
std::cout<<a[i]<<"+"<<b[i]<<"="<<c[i]<<std::endl;
}
*/
std::cout<<"done\n";
free(a);
free(b);
free(c);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
return 0;
}
|
.text
.file "vector_add_blocks.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z18__device_stub__addPiS_S_ # -- Begin function _Z18__device_stub__addPiS_S_
.p2align 4, 0x90
.type _Z18__device_stub__addPiS_S_,@function
_Z18__device_stub__addPiS_S_: # @_Z18__device_stub__addPiS_S_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z3addPiS_S_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z18__device_stub__addPiS_S_, .Lfunc_end0-_Z18__device_stub__addPiS_S_
.cfi_endproc
# -- End function
.globl _Z11random_intsPii # -- Begin function _Z11random_intsPii
.p2align 4, 0x90
.type _Z11random_intsPii,@function
_Z11random_intsPii: # @_Z11random_intsPii
.cfi_startproc
# %bb.0:
testl %esi, %esi
jle .LBB1_4
# %bb.1: # %.lr.ph.preheader
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rdi, %rbx
movl %esi, %r14d
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB1_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
callq rand
cltq
imulq $274877907, %rax, %rcx # imm = 0x10624DD3
movq %rcx, %rdx
shrq $63, %rdx
sarq $38, %rcx
addl %edx, %ecx
imull $1000, %ecx, %ecx # imm = 0x3E8
subl %ecx, %eax
movl %eax, (%rbx,%r15,4)
incq %r15
cmpq %r15, %r14
jne .LBB1_2
# %bb.3:
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %r14
.cfi_restore %r15
.LBB1_4: # %._crit_edge
retq
.Lfunc_end1:
.size _Z11random_intsPii, .Lfunc_end1-_Z11random_intsPii
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $128, %rsp
.cfi_def_cfa_offset 160
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
leaq 16(%rsp), %rdi
movq $-294967296, %rsi # imm = 0xEE6B2800
callq hipMalloc
leaq 8(%rsp), %rdi
movq $-294967296, %rsi # imm = 0xEE6B2800
callq hipMalloc
movq %rsp, %rdi
movq $-294967296, %rsi # imm = 0xEE6B2800
callq hipMalloc
movq $-294967296, %rdi # imm = 0xEE6B2800
callq malloc
movq %rax, %rbx
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB2_1: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
callq rand
cltq
imulq $274877907, %rax, %rcx # imm = 0x10624DD3
movq %rcx, %rdx
shrq $63, %rdx
sarq $38, %rcx
addl %edx, %ecx
imull $1000, %ecx, %ecx # imm = 0x3E8
subl %ecx, %eax
movl %eax, (%rbx,%r14,4)
incq %r14
cmpq $1000000000, %r14 # imm = 0x3B9ACA00
jne .LBB2_1
# %bb.2: # %_Z11random_intsPii.exit
movq $-294967296, %rdi # imm = 0xEE6B2800
callq malloc
movq %rax, %r14
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB2_3: # %.lr.ph.i21
# =>This Inner Loop Header: Depth=1
callq rand
cltq
imulq $274877907, %rax, %rcx # imm = 0x10624DD3
movq %rcx, %rdx
shrq $63, %rdx
sarq $38, %rcx
addl %edx, %ecx
imull $1000, %ecx, %ecx # imm = 0x3E8
subl %ecx, %eax
movl %eax, (%r14,%r15,4)
incq %r15
cmpq $1000000000, %r15 # imm = 0x3B9ACA00
jne .LBB2_3
# %bb.4: # %_Z11random_intsPii.exit25
movq $-294967296, %rdi # imm = 0xEE6B2800
callq malloc
movq %rax, %r15
movq 16(%rsp), %rdi
movq %rbx, %rsi
movq $-294967296, %rdx # imm = 0xEE6B2800
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
movq %r14, %rsi
movq $-294967296, %rdx # imm = 0xEE6B2800
movl $1, %ecx
callq hipMemcpy
movabsq $4294967297, %rdx # imm = 0x100000001
leaq 999999999(%rdx), %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_6
# %bb.5:
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movq (%rsp), %rdx
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
movq %rdx, 72(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z3addPiS_S_, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_6:
movq (%rsp), %rsi
movq %r15, %rdi
movq $-294967296, %rdx # imm = 0xEE6B2800
movl $2, %ecx
callq hipMemcpy
callq hipDeviceSynchronize
callq hipGetLastError
testl %eax, %eax
je .LBB2_8
# %bb.7:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
.LBB2_8:
movl $_ZSt4cout, %edi
movl $.L.str.1, %esi
movl $5, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
movq %r15, %rdi
callq free
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movq (%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $128, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z3addPiS_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z3addPiS_S_,@object # @_Z3addPiS_S_
.section .rodata,"a",@progbits
.globl _Z3addPiS_S_
.p2align 3, 0x0
_Z3addPiS_S_:
.quad _Z18__device_stub__addPiS_S_
.size _Z3addPiS_S_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Error: %s\n"
.size .L.str, 11
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "done\n"
.size .L.str.1, 6
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z3addPiS_S_"
.size .L__unnamed_1, 13
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z18__device_stub__addPiS_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z3addPiS_S_
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_
|
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
|
code for sm_80
Function : _Z3addPiS_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e220000002500 */
/*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0040*/ IMAD.WIDE.U32 R2, R6, R7, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x001fc800078e0007 */
/*0050*/ IMAD.WIDE.U32 R4, R6.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x0c0fe400078e0007 */
/*0060*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*0070*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea2000c1e1900 */
/*0080*/ IMAD.WIDE.U32 R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fe200078e0007 */
/*0090*/ IADD3 R9, R2, R5, RZ ; /* 0x0000000502097210 */
/* 0x004fca0007ffe0ff */
/*00a0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00b0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00c0*/ BRA 0xc0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
|
.text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z3addPiS_S_
.globl _Z3addPiS_S_
.p2align 8
.type _Z3addPiS_S_,@function
_Z3addPiS_S_:
s_load_b128 s[4:7], s[0:1], 0x0
s_mov_b32 s2, s15
s_mov_b32 s3, 0
s_load_b64 s[0:1], s[0:1], 0x10
s_lshl_b64 s[2:3], s[2:3], 2
s_waitcnt lgkmcnt(0)
s_add_u32 s4, s4, s2
s_addc_u32 s5, s5, s3
s_add_u32 s6, s6, s2
s_addc_u32 s7, s7, s3
s_load_b32 s4, s[4:5], 0x0
s_load_b32 s5, s[6:7], 0x0
s_waitcnt lgkmcnt(0)
s_add_i32 s4, s5, s4
s_delay_alu instid0(SALU_CYCLE_1)
v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s4
s_add_u32 s0, s0, s2
s_addc_u32 s1, s1, s3
global_store_b32 v0, v1, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z3addPiS_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 24
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 2
.amdhsa_next_free_sgpr 16
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z3addPiS_S_, .Lfunc_end0-_Z3addPiS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 24
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z3addPiS_S_
.private_segment_fixed_size: 0
.sgpr_count: 16
.sgpr_spill_count: 0
.symbol: _Z3addPiS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 2
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata
|
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
|
.file "tmpxft_00172314_00000000-6_vector_add_blocks.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3673:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3673:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z11random_intsPii
.type _Z11random_intsPii, @function
_Z11random_intsPii:
.LFB3669:
.cfi_startproc
endbr64
testl %esi, %esi
jle .L8
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $8, %rsp
.cfi_def_cfa_offset 32
movq %rdi, %rbx
movslq %esi, %rsi
leaq (%rdi,%rsi,4), %rbp
.L5:
call rand@PLT
movslq %eax, %rdx
imulq $274877907, %rdx, %rdx
sarq $38, %rdx
movl %eax, %ecx
sarl $31, %ecx
subl %ecx, %edx
imull $1000, %edx, %edx
subl %edx, %eax
movl %eax, (%rbx)
addq $4, %rbx
cmpq %rbp, %rbx
jne .L5
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L8:
.cfi_restore 3
.cfi_restore 6
ret
.cfi_endproc
.LFE3669:
.size _Z11random_intsPii, .-_Z11random_intsPii
.globl _Z26__device_stub__Z3addPiS_S_PiS_S_
.type _Z26__device_stub__Z3addPiS_S_PiS_S_, @function
_Z26__device_stub__Z3addPiS_S_PiS_S_:
.LFB3695:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z3addPiS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3695:
.size _Z26__device_stub__Z3addPiS_S_PiS_S_, .-_Z26__device_stub__Z3addPiS_S_PiS_S_
.globl _Z3addPiS_S_
.type _Z3addPiS_S_, @function
_Z3addPiS_S_:
.LFB3696:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z26__device_stub__Z3addPiS_S_PiS_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3696:
.size _Z3addPiS_S_, .-_Z3addPiS_S_
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Error: %s\n"
.LC1:
.string "done\n"
.text
.globl main
.type main, @function
main:
.LFB3670:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $64, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rdi
movq $-294967296, %rsi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movq $-294967296, %rsi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movq $-294967296, %rsi
call cudaMalloc@PLT
movq $-294967296, %rdi
call malloc@PLT
movq %rax, %rbp
movl $1000000000, %esi
movq %rax, %rdi
call _Z11random_intsPii
movq $-294967296, %rdi
call malloc@PLT
movq %rax, %rbx
movl $1000000000, %esi
movq %rax, %rdi
call _Z11random_intsPii
movq $-294967296, %rdi
call malloc@PLT
movq %rax, %r12
movl $1, %ecx
movq $-294967296, %rdx
movq %rbp, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movq $-294967296, %rdx
movq %rbx, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1000000000, 32(%rsp)
movl $1, 36(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L24
.L20:
movl $2, %ecx
movq $-294967296, %rdx
movq 24(%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
call cudaDeviceSynchronize@PLT
call cudaGetLastError@PLT
testl %eax, %eax
jne .L25
.L21:
leaq .LC1(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rbp, %rdi
call free@PLT
movq %rbx, %rdi
call free@PLT
movq %r12, %rdi
call free@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L26
movl $0, %eax
addq $64, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L24:
.cfi_restore_state
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z26__device_stub__Z3addPiS_S_PiS_S_
jmp .L20
.L25:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L21
.L26:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3670:
.size main, .-main
.section .rodata.str1.1
.LC2:
.string "_Z3addPiS_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3698:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z3addPiS_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3698:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4:
|
.text
.file "vector_add_blocks.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z18__device_stub__addPiS_S_ # -- Begin function _Z18__device_stub__addPiS_S_
.p2align 4, 0x90
.type _Z18__device_stub__addPiS_S_,@function
_Z18__device_stub__addPiS_S_: # @_Z18__device_stub__addPiS_S_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z3addPiS_S_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z18__device_stub__addPiS_S_, .Lfunc_end0-_Z18__device_stub__addPiS_S_
.cfi_endproc
# -- End function
.globl _Z11random_intsPii # -- Begin function _Z11random_intsPii
.p2align 4, 0x90
.type _Z11random_intsPii,@function
_Z11random_intsPii: # @_Z11random_intsPii
.cfi_startproc
# %bb.0:
testl %esi, %esi
jle .LBB1_4
# %bb.1: # %.lr.ph.preheader
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rdi, %rbx
movl %esi, %r14d
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB1_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
callq rand
cltq
imulq $274877907, %rax, %rcx # imm = 0x10624DD3
movq %rcx, %rdx
shrq $63, %rdx
sarq $38, %rcx
addl %edx, %ecx
imull $1000, %ecx, %ecx # imm = 0x3E8
subl %ecx, %eax
movl %eax, (%rbx,%r15,4)
incq %r15
cmpq %r15, %r14
jne .LBB1_2
# %bb.3:
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %r14
.cfi_restore %r15
.LBB1_4: # %._crit_edge
retq
.Lfunc_end1:
.size _Z11random_intsPii, .Lfunc_end1-_Z11random_intsPii
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $128, %rsp
.cfi_def_cfa_offset 160
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
leaq 16(%rsp), %rdi
movq $-294967296, %rsi # imm = 0xEE6B2800
callq hipMalloc
leaq 8(%rsp), %rdi
movq $-294967296, %rsi # imm = 0xEE6B2800
callq hipMalloc
movq %rsp, %rdi
movq $-294967296, %rsi # imm = 0xEE6B2800
callq hipMalloc
movq $-294967296, %rdi # imm = 0xEE6B2800
callq malloc
movq %rax, %rbx
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB2_1: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
callq rand
cltq
imulq $274877907, %rax, %rcx # imm = 0x10624DD3
movq %rcx, %rdx
shrq $63, %rdx
sarq $38, %rcx
addl %edx, %ecx
imull $1000, %ecx, %ecx # imm = 0x3E8
subl %ecx, %eax
movl %eax, (%rbx,%r14,4)
incq %r14
cmpq $1000000000, %r14 # imm = 0x3B9ACA00
jne .LBB2_1
# %bb.2: # %_Z11random_intsPii.exit
movq $-294967296, %rdi # imm = 0xEE6B2800
callq malloc
movq %rax, %r14
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB2_3: # %.lr.ph.i21
# =>This Inner Loop Header: Depth=1
callq rand
cltq
imulq $274877907, %rax, %rcx # imm = 0x10624DD3
movq %rcx, %rdx
shrq $63, %rdx
sarq $38, %rcx
addl %edx, %ecx
imull $1000, %ecx, %ecx # imm = 0x3E8
subl %ecx, %eax
movl %eax, (%r14,%r15,4)
incq %r15
cmpq $1000000000, %r15 # imm = 0x3B9ACA00
jne .LBB2_3
# %bb.4: # %_Z11random_intsPii.exit25
movq $-294967296, %rdi # imm = 0xEE6B2800
callq malloc
movq %rax, %r15
movq 16(%rsp), %rdi
movq %rbx, %rsi
movq $-294967296, %rdx # imm = 0xEE6B2800
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
movq %r14, %rsi
movq $-294967296, %rdx # imm = 0xEE6B2800
movl $1, %ecx
callq hipMemcpy
movabsq $4294967297, %rdx # imm = 0x100000001
leaq 999999999(%rdx), %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_6
# %bb.5:
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movq (%rsp), %rdx
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
movq %rdx, 72(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z3addPiS_S_, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_6:
movq (%rsp), %rsi
movq %r15, %rdi
movq $-294967296, %rdx # imm = 0xEE6B2800
movl $2, %ecx
callq hipMemcpy
callq hipDeviceSynchronize
callq hipGetLastError
testl %eax, %eax
je .LBB2_8
# %bb.7:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
.LBB2_8:
movl $_ZSt4cout, %edi
movl $.L.str.1, %esi
movl $5, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
movq %r15, %rdi
callq free
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movq (%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $128, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z3addPiS_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z3addPiS_S_,@object # @_Z3addPiS_S_
.section .rodata,"a",@progbits
.globl _Z3addPiS_S_
.p2align 3, 0x0
_Z3addPiS_S_:
.quad _Z18__device_stub__addPiS_S_
.size _Z3addPiS_S_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Error: %s\n"
.size .L.str, 11
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "done\n"
.size .L.str.1, 6
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z3addPiS_S_"
.size .L__unnamed_1, 13
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z18__device_stub__addPiS_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z3addPiS_S_
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_
|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
|
#include <sstream>
#include <fstream>
#include <set>
#include <iostream>
#include <map>
#include <vector>
#include <iostream>
#include <bits/stdc++.h>
#include <sstream>
#include <ctime>
#include <cstdint>
#include <stdint.h>
#define SQUEEZE 32
#define DELIMITR " "
#define IS_CHANGED 1
#define NOT_CHANGED 0
#define BLOCK_SIZE 32
using namespace std;
using String = std::string;
class Grammar {
public:
std::set<String> nonterminalSet;
std::set<String> terminalSet;
std::map<String, std::set<std::pair<String, String >>> productionsDouble;
std::map<std::pair<String, String>, std::set<String >> reverseProductionsDouble;
std::map<String, std::set<String>> productionsUnary;//NonTerminal-> Set of Terminal
std::map<String, std::set<String>> reverseProductionsUnary;// Terminal -> Set of non terminal
std::map<String, std::set<String>> nonTerminalToBodyOfProduction;// NonTerminal ->Set nont termianl aka nontermina+ eleme from set or vice versa is key for reverseProduction
void parse_grammar(const String &filename, const String &delimiter = " ") {
std::ifstream file(filename);
if (file.is_open()) {
std::string line;
while (getline(file, line)) {
process_grammar_line(line, delimiter);
}
file.close();
}
make_reverse_relations();
make_nonTerminalToBodyOfProduction();
}
private:
void make_reverse_relations() {
//reverseProductionUnary
make_unary_reverse_relation();
make_double_reverse_relation();
}
void process_grammar_line(String line, const String &delimiter = " ") {
size_t pos = 0;
std::string token[2];
int c = 0;
while ((pos = line.find(delimiter)) != std::string::npos) {
token[c] = line.substr(0, pos);
line.erase(0, pos + delimiter.length());
c++;
}
String head = token[0];
if (c == 2) {
String left_terminal = token[1];
String right_terminal = line;
auto tail = make_pair(left_terminal, right_terminal);
this->nonterminalSet.insert(head);// нетерминалы множество
this->nonterminalSet.insert(left_terminal);
this->nonterminalSet.insert(right_terminal);
if (this->productionsDouble.count(head) == 1) { // продукции
auto iter = this->productionsDouble.find(head);
iter->second.insert(tail);
} else {
this->productionsDouble.insert(make_pair(head, set<pair<String, String >>({tail})));
}
} else if (c == 1) {
const String &terminal = line;
this->nonterminalSet.insert(head);
if (this->productionsUnary.count(head) == 1) {
auto iter = this->productionsUnary.find(head);
iter->second.insert(terminal);
} else {
this->productionsUnary.insert(make_pair(head, set<String>({terminal})));
}
this->terminalSet.insert(terminal);
} else {
throw "Error while process line from grammar";
}
}
void make_unary_reverse_relation() {
for (auto nonterminal: this->productionsUnary) {
for (auto terminal: nonterminal.second) {
if (reverseProductionsUnary.count(terminal) == 1) {
reverseProductionsUnary.find(terminal)->second.insert(nonterminal.first);
} else {
reverseProductionsUnary.insert(make_pair(terminal, set<String>({nonterminal.first})));
}
}
}
}
void make_double_reverse_relation() {
for (auto head:this->productionsDouble) {
for (auto elem_pair:head.second) {
if (reverseProductionsDouble.count(elem_pair) == 1) {
reverseProductionsDouble.find(elem_pair)->second.insert(head.first);
} else {
reverseProductionsDouble.insert(make_pair(elem_pair, set<String>({head.first})));
}
}
}
}
void make_nonTerminalToBodyOfProduction() {
for (auto leftNonTerminal: nonterminalSet) {
for (auto rightNonTerminal:nonterminalSet) {
auto key = make_pair(leftNonTerminal, rightNonTerminal);
if (reverseProductionsDouble.count(key)) {
if (nonTerminalToBodyOfProduction.count(leftNonTerminal)) {
nonTerminalToBodyOfProduction.find(leftNonTerminal)->second.insert(rightNonTerminal);
} else {
nonTerminalToBodyOfProduction.insert(
make_pair(leftNonTerminal, set<String>({rightNonTerminal})));
}
if (nonTerminalToBodyOfProduction.count(rightNonTerminal)) {
nonTerminalToBodyOfProduction.find(rightNonTerminal)->second.insert(leftNonTerminal);
} else {
nonTerminalToBodyOfProduction.insert(
make_pair(rightNonTerminal, set<String>({leftNonTerminal})));
}
} else {
}
}
}
}
};
class Edge {
public:
int from;
set<String> label;
int to;
Edge(int from, int to) {
this->from = from;
this->to = to;
}
};
class Graph {
public:
vector<Edge> edges;
int max_number_of_vertex;
int multiple_by_32; // is maxnumber if maxnumber % 32=0 or max_number+ (32 -maxnumber % 32)
void parse_graph(const String &filename, const String &delimiter = " ") {
std::ifstream file(filename);
int max_vertex = 0;
if (file.is_open()) {
std::string line;
while (getline(file, line)) {
size_t pos = 0;
std::string token[2];
int c = 0;
while ((pos = line.find(delimiter)) != std::string::npos) {
token[c] = line.substr(0, pos);
line.erase(0, pos + delimiter.length());
c++;
}
if (c == 2) {
int l = std::stoi(token[0]);
int r = std::stoi(line);
max_vertex = std::max(std::max(l, r), max_vertex);
Edge edge = Edge(l, r);
edge.label.insert(token[1]);
edges.push_back(edge);
} else {
throw "Error while process line from graph";
}
}
file.close();
} else{
throw "Error File not found";
}
max_vertex+=1;// т.к у нас верщины присутствует от 0 до max_vertex включетельно
max_number_of_vertex = max_vertex;
if (max_vertex % SQUEEZE == 0) {
multiple_by_32 = max_vertex;
} else {
int quout = max_vertex % SQUEEZE;
multiple_by_32 = max_vertex + SQUEEZE - quout;
}
}
void replace_terminals_to_noterminals(Grammar &grammar) {
for (auto &edge : edges) {
set<String> tmp;
for (const String &key:edge.label) {
if (grammar.reverseProductionsUnary.count(key) == 1) {
tmp.insert(grammar.reverseProductionsUnary.find(key)->second.begin(),
grammar.reverseProductionsUnary.find(key)->second.end());
}
}
edge.label.clear();
edge.label.insert(tmp.begin(), tmp.end());
}
}
};
uint32_t * allocate_matrix_host(int rows,int cols) {
// allocate memory in host RAM
uint32_t *matrix;
cudaMallocHost((void **) &matrix, sizeof(uint32_t)*rows * cols);
return matrix;
}
uint32_t * allocate_matrix_device(int rows,int cols){
uint32_t *matrix;
cudaMalloc((void **) &matrix, sizeof(uint32_t)*rows*cols);
return matrix;
}
void delete_matrix_device(uint32_t * matrix) {
cudaFree(matrix);
}
void delete_matrix_host(uint32_t * matrix) {
cudaFreeHost(matrix);
}
//__device__ is_changed = 0;
__global__ void gpu_matrix_mult(uint32_t *a,uint32_t *b, uint32_t *c, int m, int n, int k,uint32_t * is_changed)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t sum = 0;
uint32_t old_c;
if( col < k && row < m)
{
old_c = c[row*k+col];
sum = 0;
for(int i = 0; i < n; i++)
{
sum |= a[row * n + i] & b[i * k + col];
}
sum|=old_c;
if(*is_changed == NOT_CHANGED && sum!=old_c ) {
*is_changed = IS_CHANGED;
}
c[row * k + col] = sum;
}
// uint32_t value = 0;
//
// for (int k = 0; k < row_b; k++) {
// value |= a[i * row_b + k] & b[k * col_b + j];
// }
// if (*is_changed == NOT_CHANGED && (c[i * col_b + j] | value) != c[i * col_b + j]) {
// *is_changed = IS_CHANGED;
// }
// c[i * col_b + j] |=
}
struct Matrix {
uint32_t *matrix_host;
uint32_t *matrix_device;
uint32_t *matrix_squeezed_host;
uint32_t *is_changed_host;
};
struct Table {
uint32_t *table_n;
uint32_t *table_last;
};
class Solution {
public:
Graph graph;
Grammar grammar;
map<String, Matrix> nonTerminalToMatrix;
uint32_t * extra_matrix;
Table table;
Solution(const String &filename_grammar, const String &filename_graph, const String &delimiter = " ") {
// add table size as parameter
graph.parse_graph(filename_graph, delimiter);
grammar.parse_grammar(filename_grammar, delimiter);
graph.replace_terminals_to_noterminals(grammar);
construct_and_fill_matrices_for_nonterminal_test();
}
void compute_result() {
// initial setup
set<String> changed_matrices = set<String>();
for (auto &elem: nonTerminalToMatrix) {
if (*elem.second.is_changed_host == IS_CHANGED) {
changed_matrices.insert(elem.first);
}
}
if (changed_matrices.empty()) {
return;//
}
while (true) {
set<String> new_changed_matrices = set<String>();
for (auto &nonterminal: changed_matrices) {
if (grammar.nonTerminalToBodyOfProduction.count(nonterminal)) {
auto const &possibly_second_key_set = grammar.nonTerminalToBodyOfProduction.find(
nonterminal)->second;
// перемножаем все пары матриц, в теле которых стоит этот нетерминал если он там присутствует
for (const auto &sec: possibly_second_key_set) {
auto key1 = make_pair(nonterminal, sec);
auto key2 = make_pair(sec, nonterminal);
if (grammar.reverseProductionsDouble.count(key1)) {
auto iter = grammar.reverseProductionsDouble.find(key1);
for (const auto &res: iter->second) {
auto is_changed = perform_matrix_mul(res, iter->first.first, iter->first.second);
if (is_changed) {
new_changed_matrices.insert(res);
}
}
}
if (grammar.reverseProductionsDouble.count(key2)) {
auto iter = grammar.reverseProductionsDouble.find(key2);
for (const auto &res: iter->second) {
auto is_changed = perform_matrix_mul(res, iter->first.first, iter->first.second);
if (is_changed) {
new_changed_matrices.insert(res);
}
}
}
}
}
}
if (new_changed_matrices.empty()) {
//copy
break;
} else {
changed_matrices = new_changed_matrices;
//update matrices
}
//transfer
}
}
private:
// не забудь здесь выставить флаги для тех матриц, в которых не нули
// void construct_and_fill_matrices_for_nonterminals() {
// int rows = this->graph.multiple_by_32;
// int cols = this->graph.multiple_by_32 / SQUEEZE; // сжимаем по строкам
// for (auto nonterminal: grammar.nonterminalSet) {
// Matrix matrix = Matrix();
// matrix.matrix_host = alloc_matrix_host_with_zeros(rows, cols);
// matrix.is_changed_host = alloc_matrix_host_with_zeros(1, 1);
// this->nonTerminalToMatrix.insert(make_pair(nonterminal, matrix));
// matrix.matrix_device = alloc_matrix_device_with_zeros(rows, cols);// на гпу
// }// заполнили нулями для хоста
// for (auto &edge:graph.edges) {
// auto i = edge.from;
// auto j = edge.to;
// for (const auto &nonterminal:edge.label) { // заполнилии 1 в i,j для матриц на метках из i в j есть этот нетерминал
// fill_squeezed_matrix(this->nonTerminalToMatrix.find(nonterminal)->second.matrix_host, i, j,
// graph.multiple_by_32);
// }
// }
// for (const auto &nonterminal: grammar.nonterminalSet) {//трансфер данные с цпу на гпу
// auto &matrix = this->nonTerminalToMatrix.find(nonterminal)->second;
// transfer_matrix_from_host_to_gpu(matrix.matrix_host, matrix.matrix_device, rows, cols);
// }
// }
void construct_and_fill_matrices_for_nonterminal_test() {
int rows = this->graph.max_number_of_vertex;
int cols = this->graph.max_number_of_vertex;
int squeezed_cols = this->graph.multiple_by_32;
for (auto nonterminal: grammar.nonterminalSet) {
Matrix matrix = Matrix();
matrix.matrix_host = allocate_matrix_host(rows,cols); //alloc_matrix_host_with_zeros(rows, cols);
// matrix.matrix_squeezed_host = new uint32_t[rows*squeezed_cols];
matrix.is_changed_host = allocate_matrix_host(1,1);
*matrix.is_changed_host = NOT_CHANGED;
this->nonTerminalToMatrix.insert(make_pair(nonterminal, matrix));
}// заполнили нулями для хоста
extra_matrix = allocate_matrix_host(cols,rows); // аллок памяти для доп матрицы
for (auto &edge:graph.edges) {
auto i = edge.from;
auto j = edge.to;
for (const auto &nonterminal:edge.label) { // заполнилии 1 в i,j для матриц на метках из i в j есть этот нетерминал
auto &matrix = this->nonTerminalToMatrix.find(nonterminal)->second;
matrix.matrix_host[i * cols + j] = 1;
//write_bit(matrix.matrix_squeezed_host,i,j,squeezed_cols);
if (*matrix.is_changed_host == NOT_CHANGED) {
*matrix.is_changed_host = IS_CHANGED;
}
}
}
}
void write_bit(uint32_t *m, int i, int j,int cols){
// m[i * cols + (j / 32)] |= (1ULL << (31 - (j % 32)));
m[i * cols + (j / 32)] |= (1 << (31 - (j % 32)));
}
inline void fill_squeezed_matrix(uint32_t *matrix, int i, int j, int size32) {
// строка ок
int cols = size32 / 32;
int position_in_number32 = (SQUEEZE - 1) - (j % SQUEEZE);
int position_in_squezzed_row = j / 32;
matrix[i * cols + position_in_squezzed_row] |= (1L << position_in_number32);
}
// uint32_t *alloc_matrix_host_with_zeros(int rows, int cols) {
// }
// uint32_t *alloc_matrix_device_with_zeros(int rows, int cols) {
// }
void transfer_matrix_from_host_to_gpu(uint32_t *host, uint32_t *device, int rows, int cols) {
//
}
void transfer_matrix_from_gpu_to_host(uint32_t *device, uint32_t *host, int rows, int cols) {
}
void gpu_version(const uint32_t *a, const uint32_t *b, uint32_t *c, int n, uint32_t *is_changed){
// c += ab
// cout<<"H";
uint32_t * a_d = allocate_matrix_device(n,n);
uint32_t * b_d = allocate_matrix_device(n,n);
uint32_t * c_d = allocate_matrix_device(n,n);
uint32_t * flag_device = allocate_matrix_device(1,1);
cudaMemcpy( a_d,a, sizeof(uint32_t)*n*n, cudaMemcpyHostToDevice);
cudaMemcpy( b_d,b, sizeof(uint32_t)*n*n, cudaMemcpyHostToDevice);
cudaMemcpy( c_d,c, sizeof(uint32_t)*n*n, cudaMemcpyHostToDevice);
cudaMemcpy( flag_device,is_changed, sizeof(uint32_t), cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid((n + BLOCK_SIZE - 1) / BLOCK_SIZE, (n + BLOCK_SIZE - 1) / BLOCK_SIZE);
gpu_matrix_mult<<<dimGrid,dimBlock>>>(a_d,b_d, c_d, n, n, n,flag_device);
cudaDeviceSynchronize();
cudaMemcpy( c,c_d, sizeof(uint32_t)*n*n, cudaMemcpyDeviceToHost);
cudaMemcpy( is_changed,flag_device, sizeof(uint32_t), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
delete_matrix_device(a_d);
delete_matrix_device(b_d);
delete_matrix_device(c_d);
delete_matrix_device(flag_device);
}
// c = ab
void dummy_subring_matrix_mul(const uint32_t *a, int row_a, int col_a, const uint32_t *b, int row_b, int col_b,
uint32_t *c, uint32_t *is_changed) {
if (col_a != row_b) {
printf("The matrices can't be multiplied with each other.\n");
return;
}
gpu_version(a,b,c,row_a,is_changed);
//
// for (int i = 0; i < row_a; i++) {
//
// for (int j = 0; j < col_b; j++) {
// uint32_t value = 0;
//
// for (int k = 0; k < row_b; k++) {
// value |= a[i * row_b + k] & b[k * col_b + j];
// }
// if (*is_changed == NOT_CHANGED && (c[i * col_b + j] | value) != c[i * col_b + j]) {
// *is_changed = IS_CHANGED;
// }
// c[i * col_b + j] |= value;
// }
// }
}
// perform algo
//
// allocate matrices and tables on device
//
// A = C*B
int perform_matrix_mul(const String &head, const String &left, const String &right) {
int rows = graph.max_number_of_vertex;
int cols = graph.max_number_of_vertex;
auto &A = this->nonTerminalToMatrix.at(head);
auto &C = this->nonTerminalToMatrix.at(left);
auto &B = this->nonTerminalToMatrix.at(right);
*A.is_changed_host = 0;
if (head == left) {// нужно создать доп матрицу т.к A = C
copy(C.matrix_host, C.matrix_host + rows * cols, extra_matrix);
dummy_subring_matrix_mul(extra_matrix, rows, cols, B.matrix_host, rows, cols, A.matrix_host,
A.is_changed_host);
}
if (head == right) {//нужно создать доп матрицу т.к A = B
copy(B.matrix_host, B.matrix_host + rows * cols, extra_matrix);
dummy_subring_matrix_mul(C.matrix_host, rows, cols, extra_matrix, rows, cols, A.matrix_host,
A.is_changed_host);
} else {
dummy_subring_matrix_mul(C.matrix_host, rows, cols, B.matrix_host, rows, cols, A.matrix_host,
A.is_changed_host);
}
return *A.is_changed_host;
}
};
int main(int argc, char* argv[]) {
auto solution = Solution(argv[1], argv[2], DELIMITR);
clock_t begin = clock();
solution.compute_result();
clock_t end = clock();
double elapsed_secs = double(end - begin) / CLOCKS_PER_SEC;
ifstream input(argv[3]);
vector<String > res(solution.grammar.nonterminalSet.begin(),solution.grammar.nonterminalSet.end());
sort(res.begin(),res.end());
ofstream outputfile;
outputfile.open(argv[3]);
for (auto &nonterminal: res) {
auto &matrix = solution.nonTerminalToMatrix.at(nonterminal);
outputfile << nonterminal;
for (int i = 0; i < solution.graph.max_number_of_vertex; i++) {
for (int j = 0; j < solution.graph.max_number_of_vertex; j++) {
if (matrix.matrix_host[i * solution.graph.max_number_of_vertex + j] != 0) {
outputfile << " " << i << " " << j;
}
}
}
outputfile << endl;
}
outputfile.close();
cout<<elapsed_secs<<endl;
}
|
code for sm_80
Function : _Z15gpu_matrix_multPjS_S_iiiS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000e280000002100 */
/*0030*/ S2R R3, SR_CTAID.Y ; /* 0x0000000000037919 */
/* 0x000e680000002600 */
/*0040*/ S2R R2, SR_TID.Y ; /* 0x0000000000027919 */
/* 0x000e620000002200 */
/*0050*/ IMAD R0, R0, c[0x0][0x0], R5 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0205 */
/*0060*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x180], PT ; /* 0x0000600000007a0c */
/* 0x000fe20003f06270 */
/*0070*/ IMAD R3, R3, c[0x0][0x4], R2 ; /* 0x0000010003037a24 */
/* 0x002fca00078e0202 */
/*0080*/ ISETP.GE.OR P0, PT, R3, c[0x0][0x178], P0 ; /* 0x00005e0003007a0c */
/* 0x000fda0000706670 */
/*0090*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00a0*/ IMAD.MOV.U32 R6, RZ, RZ, c[0x0][0x17c] ; /* 0x00005f00ff067624 */
/* 0x000fe200078e00ff */
/*00b0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*00c0*/ IMAD.MOV.U32 R5, RZ, RZ, 0x4 ; /* 0x00000004ff057424 */
/* 0x000fe400078e00ff */
/*00d0*/ IMAD R12, R3, c[0x0][0x180], R0 ; /* 0x00006000030c7a24 */
/* 0x000fe200078e0200 */
/*00e0*/ ISETP.GE.AND P0, PT, R6, 0x1, PT ; /* 0x000000010600780c */
/* 0x000fc60003f06270 */
/*00f0*/ IMAD.WIDE R12, R12, R5, c[0x0][0x170] ; /* 0x00005c000c0c7625 */
/* 0x000fca00078e0205 */
/*0100*/ LDG.E R2, [R12.64] ; /* 0x000000040c027981 */
/* 0x000162000c1e1900 */
/*0110*/ IMAD.MOV.U32 R23, RZ, RZ, RZ ; /* 0x000000ffff177224 */
/* 0x000fc800078e00ff */
/*0120*/ @!P0 BRA 0xc50 ; /* 0x00000b2000008947 */
/* 0x000fea0003800000 */
/*0130*/ IADD3 R4, R6.reuse, -0x1, RZ ; /* 0xffffffff06047810 */
/* 0x040fe20007ffe0ff */
/*0140*/ HFMA2.MMA R23, -RZ, RZ, 0, 0 ; /* 0x00000000ff177435 */
/* 0x000fe200000001ff */
/*0150*/ LOP3.LUT R6, R6, 0x3, RZ, 0xc0, !PT ; /* 0x0000000306067812 */
/* 0x000fe400078ec0ff */
/*0160*/ ISETP.GE.U32.AND P0, PT, R4, 0x3, PT ; /* 0x000000030400780c */
/* 0x000fe20003f06070 */
/*0170*/ IMAD.MOV.U32 R4, RZ, RZ, RZ ; /* 0x000000ffff047224 */
/* 0x000fd800078e00ff */
/*0180*/ @!P0 BRA 0xb50 ; /* 0x000009c000008947 */
/* 0x000fea0003800000 */
/*0190*/ IADD3 R9, -R6, c[0x0][0x17c], RZ ; /* 0x00005f0006097a10 */
/* 0x000fe20007ffe1ff */
/*01a0*/ ULDC.64 UR6, c[0x0][0x160] ; /* 0x0000580000067ab9 */
/* 0x000fe20000000a00 */
/*01b0*/ IMAD R7, R3, c[0x0][0x17c], RZ ; /* 0x00005f0003077a24 */
/* 0x000fe400078e02ff */
/*01c0*/ ISETP.GT.AND P0, PT, R9, RZ, PT ; /* 0x000000ff0900720c */
/* 0x000fe20003f04270 */
/*01d0*/ IMAD.MOV.U32 R4, RZ, RZ, RZ ; /* 0x000000ffff047224 */
/* 0x000fe400078e00ff */
/*01e0*/ IMAD.WIDE R16, R0, R5, c[0x0][0x168] ; /* 0x00005a0000107625 */
/* 0x000fd400078e0205 */
/*01f0*/ @!P0 BRA 0x9b0 ; /* 0x000007b000008947 */
/* 0x000fea0003800000 */
/*0200*/ ISETP.GT.AND P1, PT, R9, 0xc, PT ; /* 0x0000000c0900780c */
/* 0x000fe40003f24270 */
/*0210*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */
/* 0x000fd60003f0f070 */
/*0220*/ @!P1 BRA 0x6e0 ; /* 0x000004b000009947 */
/* 0x000fea0003800000 */
/*0230*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40003f0e170 */
/*0240*/ IMAD.U32 R14, RZ, RZ, UR6 ; /* 0x00000006ff0e7e24 */
/* 0x000fe2000f8e00ff */
/*0250*/ LDG.E R22, [R16.64] ; /* 0x0000000410167981 */
/* 0x0002a2000c1e1900 */
/*0260*/ IMAD.U32 R15, RZ, RZ, UR7 ; /* 0x00000007ff0f7e24 */
/* 0x000fe4000f8e00ff */
/*0270*/ IMAD.MOV.U32 R8, RZ, RZ, c[0x0][0x180] ; /* 0x00006000ff087624 */
/* 0x000fe400078e00ff */
/*0280*/ IMAD.WIDE R14, R7, 0x4, R14 ; /* 0x00000004070e7825 */
/* 0x000fc800078e020e */
/*0290*/ IMAD.WIDE R18, R8.reuse, 0x4, R16 ; /* 0x0000000408127825 */
/* 0x040fe200078e0210 */
/*02a0*/ LDG.E R28, [R14.64] ; /* 0x000000040e1c7981 */
/* 0x000ea8000c1e1900 */
/*02b0*/ LDG.E R10, [R18.64] ; /* 0x00000004120a7981 */
/* 0x000722000c1e1900 */
/*02c0*/ IMAD.WIDE R16, R8, 0x4, R18 ; /* 0x0000000408107825 */
/* 0x002fc600078e0212 */
/*02d0*/ LDG.E R11, [R14.64+0x4] ; /* 0x000004040e0b7981 */
/* 0x000f28000c1e1900 */
/*02e0*/ LDG.E R24, [R16.64] ; /* 0x0000000410187981 */
/* 0x000322000c1e1900 */
/*02f0*/ IMAD.WIDE R18, R8, 0x4, R16 ; /* 0x0000000408127825 */
/* 0x008fc600078e0210 */
/*0300*/ LDG.E R25, [R14.64+0x8] ; /* 0x000008040e197981 */
/* 0x000ee8000c1e1900 */
/*0310*/ LDG.E R27, [R14.64+0xc] ; /* 0x00000c040e1b7981 */
/* 0x000ee2000c1e1900 */
/*0320*/ IMAD.WIDE R20, R8, 0x4, R18 ; /* 0x0000000408147825 */
/* 0x000fc600078e0212 */
/*0330*/ LDG.E R26, [R18.64] ; /* 0x00000004121a7981 */
/* 0x0008e8000c1e1900 */
/*0340*/ LDG.E R29, [R14.64+0x10] ; /* 0x000010040e1d7981 */
/* 0x000ee8000c1e1900 */
/*0350*/ LDG.E R16, [R20.64] ; /* 0x0000000414107981 */
/* 0x0026e2000c1e1900 */
/*0360*/ LOP3.LUT R28, R23, R22, R28, 0xf8, !PT ; /* 0x00000016171c7212 */
/* 0x004fe200078ef81c */
/*0370*/ IMAD.WIDE R22, R8, 0x4, R20 ; /* 0x0000000408167825 */
/* 0x000fca00078e0214 */
/*0380*/ LDG.E R17, [R22.64] ; /* 0x0000000416117981 */
/* 0x0002a2000c1e1900 */
/*0390*/ LOP3.LUT R18, R28, R10, R11, 0xf8, !PT ; /* 0x0000000a1c127212 */
/* 0x010fe200078ef80b */
/*03a0*/ IMAD.WIDE R10, R8, 0x4, R22 ; /* 0x00000004080a7825 */
/* 0x000fe400078e0216 */
/*03b0*/ LDG.E R28, [R14.64+0x14] ; /* 0x000014040e1c7981 */
/* 0x000ea2000c1e1900 */
/*03c0*/ LOP3.LUT R20, R18, R24, R25, 0xf8, !PT ; /* 0x0000001812147212 */
/* 0x008fc600078ef819 */
/*03d0*/ IMAD.WIDE R18, R8, 0x4, R10 ; /* 0x0000000408127825 */
/* 0x000fe200078e020a */
/*03e0*/ LDG.E R25, [R14.64+0x18] ; /* 0x000018040e197981 */
/* 0x000ee8000c1e1900 */
/*03f0*/ LDG.E R24, [R10.64] ; /* 0x000000040a187981 */
/* 0x0008e4000c1e1900 */
/*0400*/ LOP3.LUT R10, R20, R26, R27, 0xf8, !PT ; /* 0x0000001a140a7212 */
/* 0x010fe200078ef81b */
/*0410*/ IMAD.WIDE R20, R8, 0x4, R18 ; /* 0x0000000408147825 */
/* 0x000fe200078e0212 */
/*0420*/ LDG.E R27, [R14.64+0x1c] ; /* 0x00001c040e1b7981 */
/* 0x000f28000c1e1900 */
/*0430*/ LDG.E R26, [R18.64] ; /* 0x00000004121a7981 */
/* 0x000122000c1e1900 */
/*0440*/ LOP3.LUT R16, R10, R16, R29, 0xf8, !PT ; /* 0x000000100a107212 */
/* 0x000fe200078ef81d */
/*0450*/ IMAD.WIDE R22, R8, 0x4, R20 ; /* 0x0000000408167825 */
/* 0x002fc400078e0214 */
/*0460*/ LDG.E R29, [R20.64] ; /* 0x00000004141d7981 */
/* 0x000328000c1e1900 */
/*0470*/ LDG.E R18, [R14.64+0x20] ; /* 0x000020040e127981 */
/* 0x001f28000c1e1900 */
/*0480*/ LDG.E R19, [R14.64+0x24] ; /* 0x000024040e137981 */
/* 0x000f28000c1e1900 */
/*0490*/ LDG.E R20, [R22.64] ; /* 0x0000000416147981 */
/* 0x002122000c1e1900 */
/*04a0*/ IMAD.WIDE R10, R8, 0x4, R22 ; /* 0x00000004080a7825 */
/* 0x000fc600078e0216 */
/*04b0*/ LDG.E R21, [R14.64+0x28] ; /* 0x000028040e157981 */
/* 0x000f22000c1e1900 */
/*04c0*/ LOP3.LUT R16, R16, R17, R28, 0xf8, !PT ; /* 0x0000001110107212 */
/* 0x004fc800078ef81c */
/*04d0*/ LOP3.LUT R24, R16, R24, R25, 0xf8, !PT ; /* 0x0000001810187212 */
/* 0x008fe200078ef819 */
/*04e0*/ IMAD.WIDE R16, R8, 0x4, R10 ; /* 0x0000000408107825 */
/* 0x000fe400078e020a */
/*04f0*/ LDG.E R10, [R10.64] ; /* 0x000000040a0a7981 */
/* 0x0002a2000c1e1900 */
/*0500*/ LOP3.LUT R26, R24, R26, R27, 0xf8, !PT ; /* 0x0000001a181a7212 */
/* 0x010fc600078ef81b */
/*0510*/ IMAD.WIDE R24, R8.reuse, 0x4, R16 ; /* 0x0000000408187825 */
/* 0x040fe400078e0210 */
/*0520*/ LDG.E R16, [R16.64] ; /* 0x0000000410107981 */
/* 0x000728000c1e1900 */
/*0530*/ IMAD.WIDE R22, R8, 0x4, R24 ; /* 0x0000000408167825 */
/* 0x001fe400078e0218 */
/*0540*/ LDG.E R24, [R24.64] ; /* 0x0000000418187981 */
/* 0x000f22000c1e1900 */
/*0550*/ LOP3.LUT R18, R26, R29, R18, 0xf8, !PT ; /* 0x0000001d1a127212 */
/* 0x000fc600078ef812 */
/*0560*/ LDG.E R29, [R14.64+0x2c] ; /* 0x00002c040e1d7981 */
/* 0x000f28000c1e1900 */
/*0570*/ LDG.E R17, [R14.64+0x34] ; /* 0x000034040e117981 */
/* 0x008ee2000c1e1900 */
/*0580*/ LOP3.LUT R28, R18, R20, R19, 0xf8, !PT ; /* 0x00000014121c7212 */
/* 0x000fe200078ef813 */
/*0590*/ IMAD.WIDE R18, R8.reuse, 0x4, R22 ; /* 0x0000000408127825 */
/* 0x040fe400078e0216 */
/*05a0*/ LDG.E R20, [R14.64+0x30] ; /* 0x000030040e147981 */
/* 0x000ee8000c1e1900 */
/*05b0*/ LDG.E R22, [R22.64] ; /* 0x0000000416167981 */
/* 0x0000e2000c1e1900 */
/*05c0*/ IMAD.WIDE R26, R8, 0x4, R18 ; /* 0x00000004081a7825 */
/* 0x000fc600078e0212 */
/*05d0*/ LDG.E R11, [R18.64] ; /* 0x00000004120b7981 */
/* 0x0022e8000c1e1900 */
/*05e0*/ LDG.E R23, [R14.64+0x3c] ; /* 0x00003c040e177981 */
/* 0x001ee8000c1e1900 */
/*05f0*/ LDG.E R18, [R14.64+0x38] ; /* 0x000038040e127981 */
/* 0x002ee8000c1e1900 */
/*0600*/ LDG.E R19, [R26.64] ; /* 0x000000041a137981 */
/* 0x000ee2000c1e1900 */
/*0610*/ IADD3 R9, R9, -0x10, RZ ; /* 0xfffffff009097810 */
/* 0x000fc80007ffe0ff */
/*0620*/ ISETP.GT.AND P1, PT, R9, 0xc, PT ; /* 0x0000000c0900780c */
/* 0x000fe20003f24270 */
/*0630*/ UIADD3 UR6, UP0, UR6, 0x40, URZ ; /* 0x0000004006067890 */
/* 0x000fe2000ff1e03f */
/*0640*/ IADD3 R4, R4, 0x10, RZ ; /* 0x0000001004047810 */
/* 0x000fc60007ffe0ff */
/*0650*/ UIADD3.X UR7, URZ, UR7, URZ, UP0, !UPT ; /* 0x000000073f077290 */
/* 0x000fe200087fe43f */
/*0660*/ LOP3.LUT R10, R28, R10, R21, 0xf8, !PT ; /* 0x0000000a1c0a7212 */
/* 0x004fc800078ef815 */
/*0670*/ LOP3.LUT R29, R10, R16, R29, 0xf8, !PT ; /* 0x000000100a1d7212 */
/* 0x010fc800078ef81d */
/*0680*/ LOP3.LUT R20, R29, R24, R20, 0xf8, !PT ; /* 0x000000181d147212 */
/* 0x008fc800078ef814 */
/*0690*/ LOP3.LUT R17, R20, R22, R17, 0xf8, !PT ; /* 0x0000001614117212 */
/* 0x000fc800078ef811 */
/*06a0*/ LOP3.LUT R18, R17, R11, R18, 0xf8, !PT ; /* 0x0000000b11127212 */
/* 0x000fe200078ef812 */
/*06b0*/ IMAD.WIDE R16, R8, 0x4, R26 ; /* 0x0000000408107825 */
/* 0x000fc600078e021a */
/*06c0*/ LOP3.LUT R23, R18, R19, R23, 0xf8, !PT ; /* 0x0000001312177212 */
/* 0x000fe200078ef817 */
/*06d0*/ @P1 BRA 0x240 ; /* 0xfffffb6000001947 */
/* 0x000fea000383ffff */
/*06e0*/ ISETP.GT.AND P1, PT, R9, 0x4, PT ; /* 0x000000040900780c */
/* 0x000fda0003f24270 */
/*06f0*/ @!P1 BRA 0x990 ; /* 0x0000029000009947 */
/* 0x000fea0003800000 */
/*0700*/ MOV R10, UR6 ; /* 0x00000006000a7c02 */
/* 0x000fe20008000f00 */
/*0710*/ IMAD.U32 R11, RZ, RZ, UR7 ; /* 0x00000007ff0b7e24 */
/* 0x000fe2000f8e00ff */
/*0720*/ LDG.E R24, [R16.64] ; /* 0x0000000410187981 */
/* 0x0002a2000c1e1900 */
/*0730*/ IMAD.MOV.U32 R25, RZ, RZ, c[0x0][0x180] ; /* 0x00006000ff197624 */
/* 0x000fe400078e00ff */
/*0740*/ IMAD.WIDE R10, R7, 0x4, R10 ; /* 0x00000004070a7825 */
/* 0x000fc800078e020a */
/*0750*/ IMAD.WIDE R26, R25.reuse, 0x4, R16 ; /* 0x00000004191a7825 */
/* 0x040fe200078e0210 */
/*0760*/ LDG.E R8, [R10.64+0x4] ; /* 0x000004040a087981 */
/* 0x000ee8000c1e1900 */
/*0770*/ LDG.E R16, [R10.64] ; /* 0x000000040a107981 */
/* 0x002ea8000c1e1900 */
/*0780*/ LDG.E R22, [R26.64] ; /* 0x000000041a167981 */
/* 0x0002e2000c1e1900 */
/*0790*/ IMAD.WIDE R18, R25, 0x4, R26 ; /* 0x0000000419127825 */
/* 0x000fc600078e021a */
/*07a0*/ LDG.E R29, [R10.64+0x8] ; /* 0x000008040a1d7981 */
/* 0x000f26000c1e1900 */
/*07b0*/ IMAD.WIDE R20, R25.reuse, 0x4, R18 ; /* 0x0000000419147825 */
/* 0x040fe400078e0212 */
/*07c0*/ LDG.E R18, [R18.64] ; /* 0x0000000412127981 */
/* 0x000128000c1e1900 */
/*07d0*/ IMAD.WIDE R14, R25, 0x4, R20 ; /* 0x00000004190e7825 */
/* 0x000fe200078e0214 */
/*07e0*/ LDG.E R28, [R20.64] ; /* 0x00000004141c7981 */
/* 0x000128000c1e1900 */
/*07f0*/ LDG.E R27, [R10.64+0x10] ; /* 0x000010040a1b7981 */
/* 0x002f28000c1e1900 */
/*0800*/ LDG.E R26, [R10.64+0x14] ; /* 0x000014040a1a7981 */
/* 0x000f22000c1e1900 */
/*0810*/ LOP3.LUT R23, R23, R24, R16, 0xf8, !PT ; /* 0x0000001817177212 */
/* 0x004fe200078ef810 */
/*0820*/ IMAD.WIDE R16, R25, 0x4, R14 ; /* 0x0000000419107825 */
/* 0x000fc400078e020e */
/*0830*/ LDG.E R24, [R10.64+0xc] ; /* 0x00000c040a187981 */
/* 0x000ea2000c1e1900 */
/*0840*/ LOP3.LUT R8, R23, R22, R8, 0xf8, !PT ; /* 0x0000001617087212 */
/* 0x008fc600078ef808 */
/*0850*/ LDG.E R14, [R14.64] ; /* 0x000000040e0e7981 */
/* 0x000ee2000c1e1900 */
/*0860*/ IMAD.WIDE R22, R25, 0x4, R16 ; /* 0x0000000419167825 */
/* 0x000fc600078e0210 */
/*0870*/ LDG.E R17, [R16.64] ; /* 0x0000000410117981 */
/* 0x0002e6000c1e1900 */
/*0880*/ IMAD.WIDE R20, R25, 0x4, R22 ; /* 0x0000000419147825 */
/* 0x001fe200078e0216 */
/*0890*/ LDG.E R19, [R22.64] ; /* 0x0000000416137981 */
/* 0x0000e8000c1e1900 */
/*08a0*/ LDG.E R16, [R10.64+0x18] ; /* 0x000018040a107981 */
/* 0x002ee8000c1e1900 */
/*08b0*/ LDG.E R23, [R10.64+0x1c] ; /* 0x00001c040a177981 */
/* 0x001ee8000c1e1900 */
/*08c0*/ LDG.E R22, [R20.64] ; /* 0x0000000414167981 */
/* 0x000ee2000c1e1900 */
/*08d0*/ LOP3.LUT R29, R8, R18, R29, 0xf8, !PT ; /* 0x00000012081d7212 */
/* 0x010fe200078ef81d */
/*08e0*/ UIADD3 UR6, UP0, UR6, 0x20, URZ ; /* 0x0000002006067890 */
/* 0x000fe2000ff1e03f */
/*08f0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fc40003f0e170 */
/*0900*/ IADD3 R4, R4, 0x8, RZ ; /* 0x0000000804047810 */
/* 0x000fe40007ffe0ff */
/*0910*/ IADD3 R9, R9, -0x8, RZ ; /* 0xfffffff809097810 */
/* 0x000fe20007ffe0ff */
/*0920*/ UIADD3.X UR7, URZ, UR7, URZ, UP0, !UPT ; /* 0x000000073f077290 */
/* 0x000fe200087fe43f */
/*0930*/ LOP3.LUT R24, R29, R28, R24, 0xf8, !PT ; /* 0x0000001c1d187212 */
/* 0x004fc800078ef818 */
/*0940*/ LOP3.LUT R14, R24, R14, R27, 0xf8, !PT ; /* 0x0000000e180e7212 */
/* 0x008fc800078ef81b */
/*0950*/ LOP3.LUT R14, R14, R17, R26, 0xf8, !PT ; /* 0x000000110e0e7212 */
/* 0x000fc800078ef81a */
/*0960*/ LOP3.LUT R14, R14, R19, R16, 0xf8, !PT ; /* 0x000000130e0e7212 */
/* 0x000fe200078ef810 */
/*0970*/ IMAD.WIDE R16, R25, 0x4, R20 ; /* 0x0000000419107825 */
/* 0x000fc600078e0214 */
/*0980*/ LOP3.LUT R23, R14, R22, R23, 0xf8, !PT ; /* 0x000000160e177212 */
/* 0x000fe400078ef817 */
/*0990*/ ISETP.NE.OR P0, PT, R9, RZ, P0 ; /* 0x000000ff0900720c */
/* 0x000fda0000705670 */
/*09a0*/ @!P0 BRA 0xb50 ; /* 0x000001a000008947 */
/* 0x000fea0003800000 */
/*09b0*/ MOV R11, UR7 ; /* 0x00000007000b7c02 */
/* 0x000fe20008000f00 */
/*09c0*/ IMAD.U32 R10, RZ, RZ, UR6 ; /* 0x00000006ff0a7e24 */
/* 0x000fe4000f8e00ff */
/*09d0*/ IMAD.MOV.U32 R25, RZ, RZ, c[0x0][0x180] ; /* 0x00006000ff197624 */
/* 0x000fe400078e00ff */
/*09e0*/ IMAD.WIDE R10, R7, 0x4, R10 ; /* 0x00000004070a7825 */
/* 0x000fc800078e020a */
/*09f0*/ IMAD.WIDE R14, R25.reuse, 0x4, R16 ; /* 0x00000004190e7825 */
/* 0x040fe200078e0210 */
/*0a00*/ LDG.E R8, [R10.64] ; /* 0x000000040a087981 */
/* 0x000ea8000c1e1900 */
/*0a10*/ LDG.E R16, [R16.64] ; /* 0x0000000410107981 */
/* 0x000ea2000c1e1900 */
/*0a20*/ IMAD.WIDE R18, R25, 0x4, R14 ; /* 0x0000000419127825 */
/* 0x000fc600078e020e */
/*0a30*/ LDG.E R15, [R14.64] ; /* 0x000000040e0f7981 */
/* 0x000ee8000c1e1900 */
/*0a40*/ LDG.E R22, [R10.64+0x4] ; /* 0x000004040a167981 */
/* 0x000ee2000c1e1900 */
/*0a50*/ IMAD.WIDE R20, R25, 0x4, R18 ; /* 0x0000000419147825 */
/* 0x000fc600078e0212 */
/*0a60*/ LDG.E R27, [R18.64] ; /* 0x00000004121b7981 */
/* 0x000f28000c1e1900 */
/*0a70*/ LDG.E R24, [R10.64+0x8] ; /* 0x000008040a187981 */
/* 0x000f28000c1e1900 */
/*0a80*/ LDG.E R26, [R10.64+0xc] ; /* 0x00000c040a1a7981 */
/* 0x000f28000c1e1900 */
/*0a90*/ LDG.E R29, [R20.64] ; /* 0x00000004141d7981 */
/* 0x000f22000c1e1900 */
/*0aa0*/ IADD3 R9, R9, -0x4, RZ ; /* 0xfffffffc09097810 */
/* 0x000fc80007ffe0ff */
/*0ab0*/ ISETP.NE.AND P0, PT, R9, RZ, PT ; /* 0x000000ff0900720c */
/* 0x000fe20003f05270 */
/*0ac0*/ UIADD3 UR6, UP0, UR6, 0x10, URZ ; /* 0x0000001006067890 */
/* 0x000fe2000ff1e03f */
/*0ad0*/ IADD3 R4, R4, 0x4, RZ ; /* 0x0000000404047810 */
/* 0x000fc60007ffe0ff */
/*0ae0*/ UIADD3.X UR7, URZ, UR7, URZ, UP0, !UPT ; /* 0x000000073f077290 */
/* 0x000fe200087fe43f */
/*0af0*/ LOP3.LUT R8, R23, R16, R8, 0xf8, !PT ; /* 0x0000001017087212 */
/* 0x004fc800078ef808 */
/*0b00*/ LOP3.LUT R8, R8, R15, R22, 0xf8, !PT ; /* 0x0000000f08087212 */
/* 0x008fe200078ef816 */
/*0b10*/ IMAD.WIDE R16, R25, 0x4, R20 ; /* 0x0000000419107825 */
/* 0x000fc600078e0214 */
/*0b20*/ LOP3.LUT R8, R8, R27, R24, 0xf8, !PT ; /* 0x0000001b08087212 */
/* 0x010fc800078ef818 */
/*0b30*/ LOP3.LUT R23, R8, R29, R26, 0xf8, !PT ; /* 0x0000001d08177212 */
/* 0x000fe200078ef81a */
/*0b40*/ @P0 BRA 0x9b0 ; /* 0xfffffe6000000947 */
/* 0x021fea000383ffff */
/*0b50*/ ISETP.NE.AND P0, PT, R6, RZ, PT ; /* 0x000000ff0600720c */
/* 0x000fda0003f05270 */
/*0b60*/ @!P0 BRA 0xc50 ; /* 0x000000e000008947 */
/* 0x000fea0003800000 */
/*0b70*/ IMAD R8, R3, c[0x0][0x17c], R4 ; /* 0x00005f0003087a24 */
/* 0x000fe400078e0204 */
/*0b80*/ IMAD R4, R4, c[0x0][0x180], R0 ; /* 0x0000600004047a24 */
/* 0x000fe400078e0200 */
/*0b90*/ IMAD.WIDE R8, R8, R5, c[0x0][0x160] ; /* 0x0000580008087625 */
/* 0x000fc800078e0205 */
/*0ba0*/ IMAD.WIDE R4, R4, R5, c[0x0][0x168] ; /* 0x00005a0004047625 */
/* 0x000fca00078e0205 */
/*0bb0*/ LDG.E R0, [R4.64] ; /* 0x0000000404007981 */
/* 0x0002a8000c1e1900 */
/*0bc0*/ LDG.E R3, [R8.64] ; /* 0x0000000408037981 */
/* 0x0006a2000c1e1900 */
/*0bd0*/ IADD3 R6, R6, -0x1, RZ ; /* 0xffffffff06067810 */
/* 0x000fe20007ffe0ff */
/*0be0*/ IMAD.MOV.U32 R7, RZ, RZ, c[0x0][0x180] ; /* 0x00006000ff077624 */
/* 0x000fc600078e00ff */
/*0bf0*/ ISETP.NE.AND P0, PT, R6, RZ, PT ; /* 0x000000ff0600720c */
/* 0x000fe20003f05270 */
/*0c00*/ IMAD.WIDE R4, R7, 0x4, R4 ; /* 0x0000000407047825 */
/* 0x002fe200078e0204 */
/*0c10*/ IADD3 R8, P1, R8, 0x4, RZ ; /* 0x0000000408087810 */
/* 0x008fca0007f3e0ff */
/*0c20*/ IMAD.X R9, RZ, RZ, R9, P1 ; /* 0x000000ffff097224 */
/* 0x000fe200008e0609 */
/*0c30*/ LOP3.LUT R23, R23, R0, R3, 0xf8, !PT ; /* 0x0000000017177212 */
/* 0x004fca00078ef803 */
/*0c40*/ @P0 BRA 0xbb0 ; /* 0xffffff6000000947 */
/* 0x000fea000383ffff */
/*0c50*/ MOV R4, c[0x0][0x188] ; /* 0x0000620000047a02 */
/* 0x000fe20000000f00 */
/*0c60*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x0][0x18c] ; /* 0x00006300ff057624 */
/* 0x000fca00078e00ff */
/*0c70*/ LDG.E R0, [R4.64] ; /* 0x0000000404007981 */
/* 0x000ea2000c1e1900 */
/*0c80*/ LOP3.LUT R23, R23, R2, RZ, 0xfc, !PT ; /* 0x0000000217177212 */
/* 0x020fe400078efcff */
/*0c90*/ ISETP.NE.AND P0, PT, R0, RZ, PT ; /* 0x000000ff0000720c */
/* 0x004fc80003f05270 */
/*0ca0*/ ISETP.EQ.OR P0, PT, R23, R2, P0 ; /* 0x000000021700720c */
/* 0x000fda0000702670 */
/*0cb0*/ @!P0 IMAD.MOV.U32 R3, RZ, RZ, 0x1 ; /* 0x00000001ff038424 */
/* 0x000fca00078e00ff */
/*0cc0*/ @!P0 STG.E [R4.64], R3 ; /* 0x0000000304008986 */
/* 0x000fe8000c101904 */
/*0cd0*/ STG.E [R12.64], R23 ; /* 0x000000170c007986 */
/* 0x000fe2000c101904 */
/*0ce0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0cf0*/ BRA 0xcf0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0d00*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0d10*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0d20*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0d30*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0d40*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0d50*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0d60*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0d70*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
|
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
|
#include <sstream>
#include <fstream>
#include <set>
#include <iostream>
#include <map>
#include <vector>
#include <iostream>
#include <bits/stdc++.h>
#include <sstream>
#include <ctime>
#include <cstdint>
#include <stdint.h>
#define SQUEEZE 32
#define DELIMITR " "
#define IS_CHANGED 1
#define NOT_CHANGED 0
#define BLOCK_SIZE 32
using namespace std;
using String = std::string;
class Grammar {
public:
std::set<String> nonterminalSet;
std::set<String> terminalSet;
std::map<String, std::set<std::pair<String, String >>> productionsDouble;
std::map<std::pair<String, String>, std::set<String >> reverseProductionsDouble;
std::map<String, std::set<String>> productionsUnary;//NonTerminal-> Set of Terminal
std::map<String, std::set<String>> reverseProductionsUnary;// Terminal -> Set of non terminal
std::map<String, std::set<String>> nonTerminalToBodyOfProduction;// NonTerminal ->Set nont termianl aka nontermina+ eleme from set or vice versa is key for reverseProduction
void parse_grammar(const String &filename, const String &delimiter = " ") {
std::ifstream file(filename);
if (file.is_open()) {
std::string line;
while (getline(file, line)) {
process_grammar_line(line, delimiter);
}
file.close();
}
make_reverse_relations();
make_nonTerminalToBodyOfProduction();
}
private:
void make_reverse_relations() {
//reverseProductionUnary
make_unary_reverse_relation();
make_double_reverse_relation();
}
void process_grammar_line(String line, const String &delimiter = " ") {
size_t pos = 0;
std::string token[2];
int c = 0;
while ((pos = line.find(delimiter)) != std::string::npos) {
token[c] = line.substr(0, pos);
line.erase(0, pos + delimiter.length());
c++;
}
String head = token[0];
if (c == 2) {
String left_terminal = token[1];
String right_terminal = line;
auto tail = make_pair(left_terminal, right_terminal);
this->nonterminalSet.insert(head);// нетерминалы множество
this->nonterminalSet.insert(left_terminal);
this->nonterminalSet.insert(right_terminal);
if (this->productionsDouble.count(head) == 1) { // продукции
auto iter = this->productionsDouble.find(head);
iter->second.insert(tail);
} else {
this->productionsDouble.insert(make_pair(head, set<pair<String, String >>({tail})));
}
} else if (c == 1) {
const String &terminal = line;
this->nonterminalSet.insert(head);
if (this->productionsUnary.count(head) == 1) {
auto iter = this->productionsUnary.find(head);
iter->second.insert(terminal);
} else {
this->productionsUnary.insert(make_pair(head, set<String>({terminal})));
}
this->terminalSet.insert(terminal);
} else {
throw "Error while process line from grammar";
}
}
void make_unary_reverse_relation() {
for (auto nonterminal: this->productionsUnary) {
for (auto terminal: nonterminal.second) {
if (reverseProductionsUnary.count(terminal) == 1) {
reverseProductionsUnary.find(terminal)->second.insert(nonterminal.first);
} else {
reverseProductionsUnary.insert(make_pair(terminal, set<String>({nonterminal.first})));
}
}
}
}
void make_double_reverse_relation() {
for (auto head:this->productionsDouble) {
for (auto elem_pair:head.second) {
if (reverseProductionsDouble.count(elem_pair) == 1) {
reverseProductionsDouble.find(elem_pair)->second.insert(head.first);
} else {
reverseProductionsDouble.insert(make_pair(elem_pair, set<String>({head.first})));
}
}
}
}
void make_nonTerminalToBodyOfProduction() {
for (auto leftNonTerminal: nonterminalSet) {
for (auto rightNonTerminal:nonterminalSet) {
auto key = make_pair(leftNonTerminal, rightNonTerminal);
if (reverseProductionsDouble.count(key)) {
if (nonTerminalToBodyOfProduction.count(leftNonTerminal)) {
nonTerminalToBodyOfProduction.find(leftNonTerminal)->second.insert(rightNonTerminal);
} else {
nonTerminalToBodyOfProduction.insert(
make_pair(leftNonTerminal, set<String>({rightNonTerminal})));
}
if (nonTerminalToBodyOfProduction.count(rightNonTerminal)) {
nonTerminalToBodyOfProduction.find(rightNonTerminal)->second.insert(leftNonTerminal);
} else {
nonTerminalToBodyOfProduction.insert(
make_pair(rightNonTerminal, set<String>({leftNonTerminal})));
}
} else {
}
}
}
}
};
class Edge {
public:
int from;
set<String> label;
int to;
Edge(int from, int to) {
this->from = from;
this->to = to;
}
};
class Graph {
public:
vector<Edge> edges;
int max_number_of_vertex;
int multiple_by_32; // is maxnumber if maxnumber % 32=0 or max_number+ (32 -maxnumber % 32)
void parse_graph(const String &filename, const String &delimiter = " ") {
std::ifstream file(filename);
int max_vertex = 0;
if (file.is_open()) {
std::string line;
while (getline(file, line)) {
size_t pos = 0;
std::string token[2];
int c = 0;
while ((pos = line.find(delimiter)) != std::string::npos) {
token[c] = line.substr(0, pos);
line.erase(0, pos + delimiter.length());
c++;
}
if (c == 2) {
int l = std::stoi(token[0]);
int r = std::stoi(line);
max_vertex = std::max(std::max(l, r), max_vertex);
Edge edge = Edge(l, r);
edge.label.insert(token[1]);
edges.push_back(edge);
} else {
throw "Error while process line from graph";
}
}
file.close();
} else{
throw "Error File not found";
}
max_vertex+=1;// т.к у нас верщины присутствует от 0 до max_vertex включетельно
max_number_of_vertex = max_vertex;
if (max_vertex % SQUEEZE == 0) {
multiple_by_32 = max_vertex;
} else {
int quout = max_vertex % SQUEEZE;
multiple_by_32 = max_vertex + SQUEEZE - quout;
}
}
void replace_terminals_to_noterminals(Grammar &grammar) {
for (auto &edge : edges) {
set<String> tmp;
for (const String &key:edge.label) {
if (grammar.reverseProductionsUnary.count(key) == 1) {
tmp.insert(grammar.reverseProductionsUnary.find(key)->second.begin(),
grammar.reverseProductionsUnary.find(key)->second.end());
}
}
edge.label.clear();
edge.label.insert(tmp.begin(), tmp.end());
}
}
};
uint32_t * allocate_matrix_host(int rows,int cols) {
// allocate memory in host RAM
uint32_t *matrix;
cudaMallocHost((void **) &matrix, sizeof(uint32_t)*rows * cols);
return matrix;
}
uint32_t * allocate_matrix_device(int rows,int cols){
uint32_t *matrix;
cudaMalloc((void **) &matrix, sizeof(uint32_t)*rows*cols);
return matrix;
}
void delete_matrix_device(uint32_t * matrix) {
cudaFree(matrix);
}
void delete_matrix_host(uint32_t * matrix) {
cudaFreeHost(matrix);
}
//__device__ is_changed = 0;
__global__ void gpu_matrix_mult(uint32_t *a,uint32_t *b, uint32_t *c, int m, int n, int k,uint32_t * is_changed)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t sum = 0;
uint32_t old_c;
if( col < k && row < m)
{
old_c = c[row*k+col];
sum = 0;
for(int i = 0; i < n; i++)
{
sum |= a[row * n + i] & b[i * k + col];
}
sum|=old_c;
if(*is_changed == NOT_CHANGED && sum!=old_c ) {
*is_changed = IS_CHANGED;
}
c[row * k + col] = sum;
}
// uint32_t value = 0;
//
// for (int k = 0; k < row_b; k++) {
// value |= a[i * row_b + k] & b[k * col_b + j];
// }
// if (*is_changed == NOT_CHANGED && (c[i * col_b + j] | value) != c[i * col_b + j]) {
// *is_changed = IS_CHANGED;
// }
// c[i * col_b + j] |=
}
struct Matrix {
uint32_t *matrix_host;
uint32_t *matrix_device;
uint32_t *matrix_squeezed_host;
uint32_t *is_changed_host;
};
struct Table {
uint32_t *table_n;
uint32_t *table_last;
};
class Solution {
public:
Graph graph;
Grammar grammar;
map<String, Matrix> nonTerminalToMatrix;
uint32_t * extra_matrix;
Table table;
Solution(const String &filename_grammar, const String &filename_graph, const String &delimiter = " ") {
// add table size as parameter
graph.parse_graph(filename_graph, delimiter);
grammar.parse_grammar(filename_grammar, delimiter);
graph.replace_terminals_to_noterminals(grammar);
construct_and_fill_matrices_for_nonterminal_test();
}
void compute_result() {
// initial setup
set<String> changed_matrices = set<String>();
for (auto &elem: nonTerminalToMatrix) {
if (*elem.second.is_changed_host == IS_CHANGED) {
changed_matrices.insert(elem.first);
}
}
if (changed_matrices.empty()) {
return;//
}
while (true) {
set<String> new_changed_matrices = set<String>();
for (auto &nonterminal: changed_matrices) {
if (grammar.nonTerminalToBodyOfProduction.count(nonterminal)) {
auto const &possibly_second_key_set = grammar.nonTerminalToBodyOfProduction.find(
nonterminal)->second;
// перемножаем все пары матриц, в теле которых стоит этот нетерминал если он там присутствует
for (const auto &sec: possibly_second_key_set) {
auto key1 = make_pair(nonterminal, sec);
auto key2 = make_pair(sec, nonterminal);
if (grammar.reverseProductionsDouble.count(key1)) {
auto iter = grammar.reverseProductionsDouble.find(key1);
for (const auto &res: iter->second) {
auto is_changed = perform_matrix_mul(res, iter->first.first, iter->first.second);
if (is_changed) {
new_changed_matrices.insert(res);
}
}
}
if (grammar.reverseProductionsDouble.count(key2)) {
auto iter = grammar.reverseProductionsDouble.find(key2);
for (const auto &res: iter->second) {
auto is_changed = perform_matrix_mul(res, iter->first.first, iter->first.second);
if (is_changed) {
new_changed_matrices.insert(res);
}
}
}
}
}
}
if (new_changed_matrices.empty()) {
//copy
break;
} else {
changed_matrices = new_changed_matrices;
//update matrices
}
//transfer
}
}
private:
// не забудь здесь выставить флаги для тех матриц, в которых не нули
// void construct_and_fill_matrices_for_nonterminals() {
// int rows = this->graph.multiple_by_32;
// int cols = this->graph.multiple_by_32 / SQUEEZE; // сжимаем по строкам
// for (auto nonterminal: grammar.nonterminalSet) {
// Matrix matrix = Matrix();
// matrix.matrix_host = alloc_matrix_host_with_zeros(rows, cols);
// matrix.is_changed_host = alloc_matrix_host_with_zeros(1, 1);
// this->nonTerminalToMatrix.insert(make_pair(nonterminal, matrix));
// matrix.matrix_device = alloc_matrix_device_with_zeros(rows, cols);// на гпу
// }// заполнили нулями для хоста
// for (auto &edge:graph.edges) {
// auto i = edge.from;
// auto j = edge.to;
// for (const auto &nonterminal:edge.label) { // заполнилии 1 в i,j для матриц на метках из i в j есть этот нетерминал
// fill_squeezed_matrix(this->nonTerminalToMatrix.find(nonterminal)->second.matrix_host, i, j,
// graph.multiple_by_32);
// }
// }
// for (const auto &nonterminal: grammar.nonterminalSet) {//трансфер данные с цпу на гпу
// auto &matrix = this->nonTerminalToMatrix.find(nonterminal)->second;
// transfer_matrix_from_host_to_gpu(matrix.matrix_host, matrix.matrix_device, rows, cols);
// }
// }
void construct_and_fill_matrices_for_nonterminal_test() {
int rows = this->graph.max_number_of_vertex;
int cols = this->graph.max_number_of_vertex;
int squeezed_cols = this->graph.multiple_by_32;
for (auto nonterminal: grammar.nonterminalSet) {
Matrix matrix = Matrix();
matrix.matrix_host = allocate_matrix_host(rows,cols); //alloc_matrix_host_with_zeros(rows, cols);
// matrix.matrix_squeezed_host = new uint32_t[rows*squeezed_cols];
matrix.is_changed_host = allocate_matrix_host(1,1);
*matrix.is_changed_host = NOT_CHANGED;
this->nonTerminalToMatrix.insert(make_pair(nonterminal, matrix));
}// заполнили нулями для хоста
extra_matrix = allocate_matrix_host(cols,rows); // аллок памяти для доп матрицы
for (auto &edge:graph.edges) {
auto i = edge.from;
auto j = edge.to;
for (const auto &nonterminal:edge.label) { // заполнилии 1 в i,j для матриц на метках из i в j есть этот нетерминал
auto &matrix = this->nonTerminalToMatrix.find(nonterminal)->second;
matrix.matrix_host[i * cols + j] = 1;
//write_bit(matrix.matrix_squeezed_host,i,j,squeezed_cols);
if (*matrix.is_changed_host == NOT_CHANGED) {
*matrix.is_changed_host = IS_CHANGED;
}
}
}
}
void write_bit(uint32_t *m, int i, int j,int cols){
// m[i * cols + (j / 32)] |= (1ULL << (31 - (j % 32)));
m[i * cols + (j / 32)] |= (1 << (31 - (j % 32)));
}
inline void fill_squeezed_matrix(uint32_t *matrix, int i, int j, int size32) {
// строка ок
int cols = size32 / 32;
int position_in_number32 = (SQUEEZE - 1) - (j % SQUEEZE);
int position_in_squezzed_row = j / 32;
matrix[i * cols + position_in_squezzed_row] |= (1L << position_in_number32);
}
// uint32_t *alloc_matrix_host_with_zeros(int rows, int cols) {
// }
// uint32_t *alloc_matrix_device_with_zeros(int rows, int cols) {
// }
void transfer_matrix_from_host_to_gpu(uint32_t *host, uint32_t *device, int rows, int cols) {
//
}
void transfer_matrix_from_gpu_to_host(uint32_t *device, uint32_t *host, int rows, int cols) {
}
void gpu_version(const uint32_t *a, const uint32_t *b, uint32_t *c, int n, uint32_t *is_changed){
// c += ab
// cout<<"H";
uint32_t * a_d = allocate_matrix_device(n,n);
uint32_t * b_d = allocate_matrix_device(n,n);
uint32_t * c_d = allocate_matrix_device(n,n);
uint32_t * flag_device = allocate_matrix_device(1,1);
cudaMemcpy( a_d,a, sizeof(uint32_t)*n*n, cudaMemcpyHostToDevice);
cudaMemcpy( b_d,b, sizeof(uint32_t)*n*n, cudaMemcpyHostToDevice);
cudaMemcpy( c_d,c, sizeof(uint32_t)*n*n, cudaMemcpyHostToDevice);
cudaMemcpy( flag_device,is_changed, sizeof(uint32_t), cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid((n + BLOCK_SIZE - 1) / BLOCK_SIZE, (n + BLOCK_SIZE - 1) / BLOCK_SIZE);
gpu_matrix_mult<<<dimGrid,dimBlock>>>(a_d,b_d, c_d, n, n, n,flag_device);
cudaDeviceSynchronize();
cudaMemcpy( c,c_d, sizeof(uint32_t)*n*n, cudaMemcpyDeviceToHost);
cudaMemcpy( is_changed,flag_device, sizeof(uint32_t), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
delete_matrix_device(a_d);
delete_matrix_device(b_d);
delete_matrix_device(c_d);
delete_matrix_device(flag_device);
}
// c = ab
void dummy_subring_matrix_mul(const uint32_t *a, int row_a, int col_a, const uint32_t *b, int row_b, int col_b,
uint32_t *c, uint32_t *is_changed) {
if (col_a != row_b) {
printf("The matrices can't be multiplied with each other.\n");
return;
}
gpu_version(a,b,c,row_a,is_changed);
//
// for (int i = 0; i < row_a; i++) {
//
// for (int j = 0; j < col_b; j++) {
// uint32_t value = 0;
//
// for (int k = 0; k < row_b; k++) {
// value |= a[i * row_b + k] & b[k * col_b + j];
// }
// if (*is_changed == NOT_CHANGED && (c[i * col_b + j] | value) != c[i * col_b + j]) {
// *is_changed = IS_CHANGED;
// }
// c[i * col_b + j] |= value;
// }
// }
}
// perform algo
//
// allocate matrices and tables on device
//
// A = C*B
int perform_matrix_mul(const String &head, const String &left, const String &right) {
int rows = graph.max_number_of_vertex;
int cols = graph.max_number_of_vertex;
auto &A = this->nonTerminalToMatrix.at(head);
auto &C = this->nonTerminalToMatrix.at(left);
auto &B = this->nonTerminalToMatrix.at(right);
*A.is_changed_host = 0;
if (head == left) {// нужно создать доп матрицу т.к A = C
copy(C.matrix_host, C.matrix_host + rows * cols, extra_matrix);
dummy_subring_matrix_mul(extra_matrix, rows, cols, B.matrix_host, rows, cols, A.matrix_host,
A.is_changed_host);
}
if (head == right) {//нужно создать доп матрицу т.к A = B
copy(B.matrix_host, B.matrix_host + rows * cols, extra_matrix);
dummy_subring_matrix_mul(C.matrix_host, rows, cols, extra_matrix, rows, cols, A.matrix_host,
A.is_changed_host);
} else {
dummy_subring_matrix_mul(C.matrix_host, rows, cols, B.matrix_host, rows, cols, A.matrix_host,
A.is_changed_host);
}
return *A.is_changed_host;
}
};
int main(int argc, char* argv[]) {
auto solution = Solution(argv[1], argv[2], DELIMITR);
clock_t begin = clock();
solution.compute_result();
clock_t end = clock();
double elapsed_secs = double(end - begin) / CLOCKS_PER_SEC;
ifstream input(argv[3]);
vector<String > res(solution.grammar.nonterminalSet.begin(),solution.grammar.nonterminalSet.end());
sort(res.begin(),res.end());
ofstream outputfile;
outputfile.open(argv[3]);
for (auto &nonterminal: res) {
auto &matrix = solution.nonTerminalToMatrix.at(nonterminal);
outputfile << nonterminal;
for (int i = 0; i < solution.graph.max_number_of_vertex; i++) {
for (int j = 0; j < solution.graph.max_number_of_vertex; j++) {
if (matrix.matrix_host[i * solution.graph.max_number_of_vertex + j] != 0) {
outputfile << " " << i << " " << j;
}
}
}
outputfile << endl;
}
outputfile.close();
cout<<elapsed_secs<<endl;
}
|
#include <hip/hip_runtime.h>
#include <sstream>
#include <fstream>
#include <set>
#include <iostream>
#include <map>
#include <vector>
#include <iostream>
#include <bits/stdc++.h>
#include <sstream>
#include <ctime>
#include <cstdint>
#include <stdint.h>
#define SQUEEZE 32
#define DELIMITR " "
#define IS_CHANGED 1
#define NOT_CHANGED 0
#define BLOCK_SIZE 32
using namespace std;
using String = std::string;
class Grammar {
public:
std::set<String> nonterminalSet;
std::set<String> terminalSet;
std::map<String, std::set<std::pair<String, String >>> productionsDouble;
std::map<std::pair<String, String>, std::set<String >> reverseProductionsDouble;
std::map<String, std::set<String>> productionsUnary;//NonTerminal-> Set of Terminal
std::map<String, std::set<String>> reverseProductionsUnary;// Terminal -> Set of non terminal
std::map<String, std::set<String>> nonTerminalToBodyOfProduction;// NonTerminal ->Set nont termianl aka nontermina+ eleme from set or vice versa is key for reverseProduction
void parse_grammar(const String &filename, const String &delimiter = " ") {
std::ifstream file(filename);
if (file.is_open()) {
std::string line;
while (getline(file, line)) {
process_grammar_line(line, delimiter);
}
file.close();
}
make_reverse_relations();
make_nonTerminalToBodyOfProduction();
}
private:
void make_reverse_relations() {
//reverseProductionUnary
make_unary_reverse_relation();
make_double_reverse_relation();
}
void process_grammar_line(String line, const String &delimiter = " ") {
size_t pos = 0;
std::string token[2];
int c = 0;
while ((pos = line.find(delimiter)) != std::string::npos) {
token[c] = line.substr(0, pos);
line.erase(0, pos + delimiter.length());
c++;
}
String head = token[0];
if (c == 2) {
String left_terminal = token[1];
String right_terminal = line;
auto tail = make_pair(left_terminal, right_terminal);
this->nonterminalSet.insert(head);// нетерминалы множество
this->nonterminalSet.insert(left_terminal);
this->nonterminalSet.insert(right_terminal);
if (this->productionsDouble.count(head) == 1) { // продукции
auto iter = this->productionsDouble.find(head);
iter->second.insert(tail);
} else {
this->productionsDouble.insert(make_pair(head, set<pair<String, String >>({tail})));
}
} else if (c == 1) {
const String &terminal = line;
this->nonterminalSet.insert(head);
if (this->productionsUnary.count(head) == 1) {
auto iter = this->productionsUnary.find(head);
iter->second.insert(terminal);
} else {
this->productionsUnary.insert(make_pair(head, set<String>({terminal})));
}
this->terminalSet.insert(terminal);
} else {
throw "Error while process line from grammar";
}
}
void make_unary_reverse_relation() {
for (auto nonterminal: this->productionsUnary) {
for (auto terminal: nonterminal.second) {
if (reverseProductionsUnary.count(terminal) == 1) {
reverseProductionsUnary.find(terminal)->second.insert(nonterminal.first);
} else {
reverseProductionsUnary.insert(make_pair(terminal, set<String>({nonterminal.first})));
}
}
}
}
void make_double_reverse_relation() {
for (auto head:this->productionsDouble) {
for (auto elem_pair:head.second) {
if (reverseProductionsDouble.count(elem_pair) == 1) {
reverseProductionsDouble.find(elem_pair)->second.insert(head.first);
} else {
reverseProductionsDouble.insert(make_pair(elem_pair, set<String>({head.first})));
}
}
}
}
void make_nonTerminalToBodyOfProduction() {
for (auto leftNonTerminal: nonterminalSet) {
for (auto rightNonTerminal:nonterminalSet) {
auto key = make_pair(leftNonTerminal, rightNonTerminal);
if (reverseProductionsDouble.count(key)) {
if (nonTerminalToBodyOfProduction.count(leftNonTerminal)) {
nonTerminalToBodyOfProduction.find(leftNonTerminal)->second.insert(rightNonTerminal);
} else {
nonTerminalToBodyOfProduction.insert(
make_pair(leftNonTerminal, set<String>({rightNonTerminal})));
}
if (nonTerminalToBodyOfProduction.count(rightNonTerminal)) {
nonTerminalToBodyOfProduction.find(rightNonTerminal)->second.insert(leftNonTerminal);
} else {
nonTerminalToBodyOfProduction.insert(
make_pair(rightNonTerminal, set<String>({leftNonTerminal})));
}
} else {
}
}
}
}
};
class Edge {
public:
int from;
set<String> label;
int to;
Edge(int from, int to) {
this->from = from;
this->to = to;
}
};
class Graph {
public:
vector<Edge> edges;
int max_number_of_vertex;
int multiple_by_32; // is maxnumber if maxnumber % 32=0 or max_number+ (32 -maxnumber % 32)
void parse_graph(const String &filename, const String &delimiter = " ") {
std::ifstream file(filename);
int max_vertex = 0;
if (file.is_open()) {
std::string line;
while (getline(file, line)) {
size_t pos = 0;
std::string token[2];
int c = 0;
while ((pos = line.find(delimiter)) != std::string::npos) {
token[c] = line.substr(0, pos);
line.erase(0, pos + delimiter.length());
c++;
}
if (c == 2) {
int l = std::stoi(token[0]);
int r = std::stoi(line);
max_vertex = std::max(std::max(l, r), max_vertex);
Edge edge = Edge(l, r);
edge.label.insert(token[1]);
edges.push_back(edge);
} else {
throw "Error while process line from graph";
}
}
file.close();
} else{
throw "Error File not found";
}
max_vertex+=1;// т.к у нас верщины присутствует от 0 до max_vertex включетельно
max_number_of_vertex = max_vertex;
if (max_vertex % SQUEEZE == 0) {
multiple_by_32 = max_vertex;
} else {
int quout = max_vertex % SQUEEZE;
multiple_by_32 = max_vertex + SQUEEZE - quout;
}
}
void replace_terminals_to_noterminals(Grammar &grammar) {
for (auto &edge : edges) {
set<String> tmp;
for (const String &key:edge.label) {
if (grammar.reverseProductionsUnary.count(key) == 1) {
tmp.insert(grammar.reverseProductionsUnary.find(key)->second.begin(),
grammar.reverseProductionsUnary.find(key)->second.end());
}
}
edge.label.clear();
edge.label.insert(tmp.begin(), tmp.end());
}
}
};
uint32_t * allocate_matrix_host(int rows,int cols) {
// allocate memory in host RAM
uint32_t *matrix;
hipHostMalloc((void **) &matrix, sizeof(uint32_t)*rows * cols, hipHostMallocDefault);
return matrix;
}
uint32_t * allocate_matrix_device(int rows,int cols){
uint32_t *matrix;
hipMalloc((void **) &matrix, sizeof(uint32_t)*rows*cols);
return matrix;
}
void delete_matrix_device(uint32_t * matrix) {
hipFree(matrix);
}
void delete_matrix_host(uint32_t * matrix) {
hipHostFree(matrix);
}
//__device__ is_changed = 0;
__global__ void gpu_matrix_mult(uint32_t *a,uint32_t *b, uint32_t *c, int m, int n, int k,uint32_t * is_changed)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t sum = 0;
uint32_t old_c;
if( col < k && row < m)
{
old_c = c[row*k+col];
sum = 0;
for(int i = 0; i < n; i++)
{
sum |= a[row * n + i] & b[i * k + col];
}
sum|=old_c;
if(*is_changed == NOT_CHANGED && sum!=old_c ) {
*is_changed = IS_CHANGED;
}
c[row * k + col] = sum;
}
// uint32_t value = 0;
//
// for (int k = 0; k < row_b; k++) {
// value |= a[i * row_b + k] & b[k * col_b + j];
// }
// if (*is_changed == NOT_CHANGED && (c[i * col_b + j] | value) != c[i * col_b + j]) {
// *is_changed = IS_CHANGED;
// }
// c[i * col_b + j] |=
}
struct Matrix {
uint32_t *matrix_host;
uint32_t *matrix_device;
uint32_t *matrix_squeezed_host;
uint32_t *is_changed_host;
};
struct Table {
uint32_t *table_n;
uint32_t *table_last;
};
class Solution {
public:
Graph graph;
Grammar grammar;
map<String, Matrix> nonTerminalToMatrix;
uint32_t * extra_matrix;
Table table;
Solution(const String &filename_grammar, const String &filename_graph, const String &delimiter = " ") {
// add table size as parameter
graph.parse_graph(filename_graph, delimiter);
grammar.parse_grammar(filename_grammar, delimiter);
graph.replace_terminals_to_noterminals(grammar);
construct_and_fill_matrices_for_nonterminal_test();
}
void compute_result() {
// initial setup
set<String> changed_matrices = set<String>();
for (auto &elem: nonTerminalToMatrix) {
if (*elem.second.is_changed_host == IS_CHANGED) {
changed_matrices.insert(elem.first);
}
}
if (changed_matrices.empty()) {
return;//
}
while (true) {
set<String> new_changed_matrices = set<String>();
for (auto &nonterminal: changed_matrices) {
if (grammar.nonTerminalToBodyOfProduction.count(nonterminal)) {
auto const &possibly_second_key_set = grammar.nonTerminalToBodyOfProduction.find(
nonterminal)->second;
// перемножаем все пары матриц, в теле которых стоит этот нетерминал если он там присутствует
for (const auto &sec: possibly_second_key_set) {
auto key1 = make_pair(nonterminal, sec);
auto key2 = make_pair(sec, nonterminal);
if (grammar.reverseProductionsDouble.count(key1)) {
auto iter = grammar.reverseProductionsDouble.find(key1);
for (const auto &res: iter->second) {
auto is_changed = perform_matrix_mul(res, iter->first.first, iter->first.second);
if (is_changed) {
new_changed_matrices.insert(res);
}
}
}
if (grammar.reverseProductionsDouble.count(key2)) {
auto iter = grammar.reverseProductionsDouble.find(key2);
for (const auto &res: iter->second) {
auto is_changed = perform_matrix_mul(res, iter->first.first, iter->first.second);
if (is_changed) {
new_changed_matrices.insert(res);
}
}
}
}
}
}
if (new_changed_matrices.empty()) {
//copy
break;
} else {
changed_matrices = new_changed_matrices;
//update matrices
}
//transfer
}
}
private:
// не забудь здесь выставить флаги для тех матриц, в которых не нули
// void construct_and_fill_matrices_for_nonterminals() {
// int rows = this->graph.multiple_by_32;
// int cols = this->graph.multiple_by_32 / SQUEEZE; // сжимаем по строкам
// for (auto nonterminal: grammar.nonterminalSet) {
// Matrix matrix = Matrix();
// matrix.matrix_host = alloc_matrix_host_with_zeros(rows, cols);
// matrix.is_changed_host = alloc_matrix_host_with_zeros(1, 1);
// this->nonTerminalToMatrix.insert(make_pair(nonterminal, matrix));
// matrix.matrix_device = alloc_matrix_device_with_zeros(rows, cols);// на гпу
// }// заполнили нулями для хоста
// for (auto &edge:graph.edges) {
// auto i = edge.from;
// auto j = edge.to;
// for (const auto &nonterminal:edge.label) { // заполнилии 1 в i,j для матриц на метках из i в j есть этот нетерминал
// fill_squeezed_matrix(this->nonTerminalToMatrix.find(nonterminal)->second.matrix_host, i, j,
// graph.multiple_by_32);
// }
// }
// for (const auto &nonterminal: grammar.nonterminalSet) {//трансфер данные с цпу на гпу
// auto &matrix = this->nonTerminalToMatrix.find(nonterminal)->second;
// transfer_matrix_from_host_to_gpu(matrix.matrix_host, matrix.matrix_device, rows, cols);
// }
// }
void construct_and_fill_matrices_for_nonterminal_test() {
int rows = this->graph.max_number_of_vertex;
int cols = this->graph.max_number_of_vertex;
int squeezed_cols = this->graph.multiple_by_32;
for (auto nonterminal: grammar.nonterminalSet) {
Matrix matrix = Matrix();
matrix.matrix_host = allocate_matrix_host(rows,cols); //alloc_matrix_host_with_zeros(rows, cols);
// matrix.matrix_squeezed_host = new uint32_t[rows*squeezed_cols];
matrix.is_changed_host = allocate_matrix_host(1,1);
*matrix.is_changed_host = NOT_CHANGED;
this->nonTerminalToMatrix.insert(make_pair(nonterminal, matrix));
}// заполнили нулями для хоста
extra_matrix = allocate_matrix_host(cols,rows); // аллок памяти для доп матрицы
for (auto &edge:graph.edges) {
auto i = edge.from;
auto j = edge.to;
for (const auto &nonterminal:edge.label) { // заполнилии 1 в i,j для матриц на метках из i в j есть этот нетерминал
auto &matrix = this->nonTerminalToMatrix.find(nonterminal)->second;
matrix.matrix_host[i * cols + j] = 1;
//write_bit(matrix.matrix_squeezed_host,i,j,squeezed_cols);
if (*matrix.is_changed_host == NOT_CHANGED) {
*matrix.is_changed_host = IS_CHANGED;
}
}
}
}
void write_bit(uint32_t *m, int i, int j,int cols){
// m[i * cols + (j / 32)] |= (1ULL << (31 - (j % 32)));
m[i * cols + (j / 32)] |= (1 << (31 - (j % 32)));
}
inline void fill_squeezed_matrix(uint32_t *matrix, int i, int j, int size32) {
// строка ок
int cols = size32 / 32;
int position_in_number32 = (SQUEEZE - 1) - (j % SQUEEZE);
int position_in_squezzed_row = j / 32;
matrix[i * cols + position_in_squezzed_row] |= (1L << position_in_number32);
}
// uint32_t *alloc_matrix_host_with_zeros(int rows, int cols) {
// }
// uint32_t *alloc_matrix_device_with_zeros(int rows, int cols) {
// }
void transfer_matrix_from_host_to_gpu(uint32_t *host, uint32_t *device, int rows, int cols) {
//
}
void transfer_matrix_from_gpu_to_host(uint32_t *device, uint32_t *host, int rows, int cols) {
}
void gpu_version(const uint32_t *a, const uint32_t *b, uint32_t *c, int n, uint32_t *is_changed){
// c += ab
// cout<<"H";
uint32_t * a_d = allocate_matrix_device(n,n);
uint32_t * b_d = allocate_matrix_device(n,n);
uint32_t * c_d = allocate_matrix_device(n,n);
uint32_t * flag_device = allocate_matrix_device(1,1);
hipMemcpy( a_d,a, sizeof(uint32_t)*n*n, hipMemcpyHostToDevice);
hipMemcpy( b_d,b, sizeof(uint32_t)*n*n, hipMemcpyHostToDevice);
hipMemcpy( c_d,c, sizeof(uint32_t)*n*n, hipMemcpyHostToDevice);
hipMemcpy( flag_device,is_changed, sizeof(uint32_t), hipMemcpyHostToDevice);
hipDeviceSynchronize();
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid((n + BLOCK_SIZE - 1) / BLOCK_SIZE, (n + BLOCK_SIZE - 1) / BLOCK_SIZE);
gpu_matrix_mult<<<dimGrid,dimBlock>>>(a_d,b_d, c_d, n, n, n,flag_device);
hipDeviceSynchronize();
hipMemcpy( c,c_d, sizeof(uint32_t)*n*n, hipMemcpyDeviceToHost);
hipMemcpy( is_changed,flag_device, sizeof(uint32_t), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
delete_matrix_device(a_d);
delete_matrix_device(b_d);
delete_matrix_device(c_d);
delete_matrix_device(flag_device);
}
// c = ab
void dummy_subring_matrix_mul(const uint32_t *a, int row_a, int col_a, const uint32_t *b, int row_b, int col_b,
uint32_t *c, uint32_t *is_changed) {
if (col_a != row_b) {
printf("The matrices can't be multiplied with each other.\n");
return;
}
gpu_version(a,b,c,row_a,is_changed);
//
// for (int i = 0; i < row_a; i++) {
//
// for (int j = 0; j < col_b; j++) {
// uint32_t value = 0;
//
// for (int k = 0; k < row_b; k++) {
// value |= a[i * row_b + k] & b[k * col_b + j];
// }
// if (*is_changed == NOT_CHANGED && (c[i * col_b + j] | value) != c[i * col_b + j]) {
// *is_changed = IS_CHANGED;
// }
// c[i * col_b + j] |= value;
// }
// }
}
// perform algo
//
// allocate matrices and tables on device
//
// A = C*B
int perform_matrix_mul(const String &head, const String &left, const String &right) {
int rows = graph.max_number_of_vertex;
int cols = graph.max_number_of_vertex;
auto &A = this->nonTerminalToMatrix.at(head);
auto &C = this->nonTerminalToMatrix.at(left);
auto &B = this->nonTerminalToMatrix.at(right);
*A.is_changed_host = 0;
if (head == left) {// нужно создать доп матрицу т.к A = C
copy(C.matrix_host, C.matrix_host + rows * cols, extra_matrix);
dummy_subring_matrix_mul(extra_matrix, rows, cols, B.matrix_host, rows, cols, A.matrix_host,
A.is_changed_host);
}
if (head == right) {//нужно создать доп матрицу т.к A = B
copy(B.matrix_host, B.matrix_host + rows * cols, extra_matrix);
dummy_subring_matrix_mul(C.matrix_host, rows, cols, extra_matrix, rows, cols, A.matrix_host,
A.is_changed_host);
} else {
dummy_subring_matrix_mul(C.matrix_host, rows, cols, B.matrix_host, rows, cols, A.matrix_host,
A.is_changed_host);
}
return *A.is_changed_host;
}
};
int main(int argc, char* argv[]) {
auto solution = Solution(argv[1], argv[2], DELIMITR);
clock_t begin = clock();
solution.compute_result();
clock_t end = clock();
double elapsed_secs = double(end - begin) / CLOCKS_PER_SEC;
ifstream input(argv[3]);
vector<String > res(solution.grammar.nonterminalSet.begin(),solution.grammar.nonterminalSet.end());
sort(res.begin(),res.end());
ofstream outputfile;
outputfile.open(argv[3]);
for (auto &nonterminal: res) {
auto &matrix = solution.nonTerminalToMatrix.at(nonterminal);
outputfile << nonterminal;
for (int i = 0; i < solution.graph.max_number_of_vertex; i++) {
for (int j = 0; j < solution.graph.max_number_of_vertex; j++) {
if (matrix.matrix_host[i * solution.graph.max_number_of_vertex + j] != 0) {
outputfile << " " << i << " " << j;
}
}
}
outputfile << endl;
}
outputfile.close();
cout<<elapsed_secs<<endl;
}
|
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
|
#include <hip/hip_runtime.h>
#include <sstream>
#include <fstream>
#include <set>
#include <iostream>
#include <map>
#include <vector>
#include <iostream>
#include <bits/stdc++.h>
#include <sstream>
#include <ctime>
#include <cstdint>
#include <stdint.h>
#define SQUEEZE 32
#define DELIMITR " "
#define IS_CHANGED 1
#define NOT_CHANGED 0
#define BLOCK_SIZE 32
using namespace std;
using String = std::string;
class Grammar {
public:
std::set<String> nonterminalSet;
std::set<String> terminalSet;
std::map<String, std::set<std::pair<String, String >>> productionsDouble;
std::map<std::pair<String, String>, std::set<String >> reverseProductionsDouble;
std::map<String, std::set<String>> productionsUnary;//NonTerminal-> Set of Terminal
std::map<String, std::set<String>> reverseProductionsUnary;// Terminal -> Set of non terminal
std::map<String, std::set<String>> nonTerminalToBodyOfProduction;// NonTerminal ->Set nont termianl aka nontermina+ eleme from set or vice versa is key for reverseProduction
void parse_grammar(const String &filename, const String &delimiter = " ") {
std::ifstream file(filename);
if (file.is_open()) {
std::string line;
while (getline(file, line)) {
process_grammar_line(line, delimiter);
}
file.close();
}
make_reverse_relations();
make_nonTerminalToBodyOfProduction();
}
private:
void make_reverse_relations() {
//reverseProductionUnary
make_unary_reverse_relation();
make_double_reverse_relation();
}
void process_grammar_line(String line, const String &delimiter = " ") {
size_t pos = 0;
std::string token[2];
int c = 0;
while ((pos = line.find(delimiter)) != std::string::npos) {
token[c] = line.substr(0, pos);
line.erase(0, pos + delimiter.length());
c++;
}
String head = token[0];
if (c == 2) {
String left_terminal = token[1];
String right_terminal = line;
auto tail = make_pair(left_terminal, right_terminal);
this->nonterminalSet.insert(head);// нетерминалы множество
this->nonterminalSet.insert(left_terminal);
this->nonterminalSet.insert(right_terminal);
if (this->productionsDouble.count(head) == 1) { // продукции
auto iter = this->productionsDouble.find(head);
iter->second.insert(tail);
} else {
this->productionsDouble.insert(make_pair(head, set<pair<String, String >>({tail})));
}
} else if (c == 1) {
const String &terminal = line;
this->nonterminalSet.insert(head);
if (this->productionsUnary.count(head) == 1) {
auto iter = this->productionsUnary.find(head);
iter->second.insert(terminal);
} else {
this->productionsUnary.insert(make_pair(head, set<String>({terminal})));
}
this->terminalSet.insert(terminal);
} else {
throw "Error while process line from grammar";
}
}
void make_unary_reverse_relation() {
for (auto nonterminal: this->productionsUnary) {
for (auto terminal: nonterminal.second) {
if (reverseProductionsUnary.count(terminal) == 1) {
reverseProductionsUnary.find(terminal)->second.insert(nonterminal.first);
} else {
reverseProductionsUnary.insert(make_pair(terminal, set<String>({nonterminal.first})));
}
}
}
}
void make_double_reverse_relation() {
for (auto head:this->productionsDouble) {
for (auto elem_pair:head.second) {
if (reverseProductionsDouble.count(elem_pair) == 1) {
reverseProductionsDouble.find(elem_pair)->second.insert(head.first);
} else {
reverseProductionsDouble.insert(make_pair(elem_pair, set<String>({head.first})));
}
}
}
}
void make_nonTerminalToBodyOfProduction() {
for (auto leftNonTerminal: nonterminalSet) {
for (auto rightNonTerminal:nonterminalSet) {
auto key = make_pair(leftNonTerminal, rightNonTerminal);
if (reverseProductionsDouble.count(key)) {
if (nonTerminalToBodyOfProduction.count(leftNonTerminal)) {
nonTerminalToBodyOfProduction.find(leftNonTerminal)->second.insert(rightNonTerminal);
} else {
nonTerminalToBodyOfProduction.insert(
make_pair(leftNonTerminal, set<String>({rightNonTerminal})));
}
if (nonTerminalToBodyOfProduction.count(rightNonTerminal)) {
nonTerminalToBodyOfProduction.find(rightNonTerminal)->second.insert(leftNonTerminal);
} else {
nonTerminalToBodyOfProduction.insert(
make_pair(rightNonTerminal, set<String>({leftNonTerminal})));
}
} else {
}
}
}
}
};
class Edge {
public:
int from;
set<String> label;
int to;
Edge(int from, int to) {
this->from = from;
this->to = to;
}
};
class Graph {
public:
vector<Edge> edges;
int max_number_of_vertex;
int multiple_by_32; // is maxnumber if maxnumber % 32=0 or max_number+ (32 -maxnumber % 32)
void parse_graph(const String &filename, const String &delimiter = " ") {
std::ifstream file(filename);
int max_vertex = 0;
if (file.is_open()) {
std::string line;
while (getline(file, line)) {
size_t pos = 0;
std::string token[2];
int c = 0;
while ((pos = line.find(delimiter)) != std::string::npos) {
token[c] = line.substr(0, pos);
line.erase(0, pos + delimiter.length());
c++;
}
if (c == 2) {
int l = std::stoi(token[0]);
int r = std::stoi(line);
max_vertex = std::max(std::max(l, r), max_vertex);
Edge edge = Edge(l, r);
edge.label.insert(token[1]);
edges.push_back(edge);
} else {
throw "Error while process line from graph";
}
}
file.close();
} else{
throw "Error File not found";
}
max_vertex+=1;// т.к у нас верщины присутствует от 0 до max_vertex включетельно
max_number_of_vertex = max_vertex;
if (max_vertex % SQUEEZE == 0) {
multiple_by_32 = max_vertex;
} else {
int quout = max_vertex % SQUEEZE;
multiple_by_32 = max_vertex + SQUEEZE - quout;
}
}
void replace_terminals_to_noterminals(Grammar &grammar) {
for (auto &edge : edges) {
set<String> tmp;
for (const String &key:edge.label) {
if (grammar.reverseProductionsUnary.count(key) == 1) {
tmp.insert(grammar.reverseProductionsUnary.find(key)->second.begin(),
grammar.reverseProductionsUnary.find(key)->second.end());
}
}
edge.label.clear();
edge.label.insert(tmp.begin(), tmp.end());
}
}
};
uint32_t * allocate_matrix_host(int rows,int cols) {
// allocate memory in host RAM
uint32_t *matrix;
hipHostMalloc((void **) &matrix, sizeof(uint32_t)*rows * cols, hipHostMallocDefault);
return matrix;
}
uint32_t * allocate_matrix_device(int rows,int cols){
uint32_t *matrix;
hipMalloc((void **) &matrix, sizeof(uint32_t)*rows*cols);
return matrix;
}
void delete_matrix_device(uint32_t * matrix) {
hipFree(matrix);
}
void delete_matrix_host(uint32_t * matrix) {
hipHostFree(matrix);
}
//__device__ is_changed = 0;
__global__ void gpu_matrix_mult(uint32_t *a,uint32_t *b, uint32_t *c, int m, int n, int k,uint32_t * is_changed)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t sum = 0;
uint32_t old_c;
if( col < k && row < m)
{
old_c = c[row*k+col];
sum = 0;
for(int i = 0; i < n; i++)
{
sum |= a[row * n + i] & b[i * k + col];
}
sum|=old_c;
if(*is_changed == NOT_CHANGED && sum!=old_c ) {
*is_changed = IS_CHANGED;
}
c[row * k + col] = sum;
}
// uint32_t value = 0;
//
// for (int k = 0; k < row_b; k++) {
// value |= a[i * row_b + k] & b[k * col_b + j];
// }
// if (*is_changed == NOT_CHANGED && (c[i * col_b + j] | value) != c[i * col_b + j]) {
// *is_changed = IS_CHANGED;
// }
// c[i * col_b + j] |=
}
struct Matrix {
uint32_t *matrix_host;
uint32_t *matrix_device;
uint32_t *matrix_squeezed_host;
uint32_t *is_changed_host;
};
struct Table {
uint32_t *table_n;
uint32_t *table_last;
};
class Solution {
public:
Graph graph;
Grammar grammar;
map<String, Matrix> nonTerminalToMatrix;
uint32_t * extra_matrix;
Table table;
Solution(const String &filename_grammar, const String &filename_graph, const String &delimiter = " ") {
// add table size as parameter
graph.parse_graph(filename_graph, delimiter);
grammar.parse_grammar(filename_grammar, delimiter);
graph.replace_terminals_to_noterminals(grammar);
construct_and_fill_matrices_for_nonterminal_test();
}
void compute_result() {
// initial setup
set<String> changed_matrices = set<String>();
for (auto &elem: nonTerminalToMatrix) {
if (*elem.second.is_changed_host == IS_CHANGED) {
changed_matrices.insert(elem.first);
}
}
if (changed_matrices.empty()) {
return;//
}
while (true) {
set<String> new_changed_matrices = set<String>();
for (auto &nonterminal: changed_matrices) {
if (grammar.nonTerminalToBodyOfProduction.count(nonterminal)) {
auto const &possibly_second_key_set = grammar.nonTerminalToBodyOfProduction.find(
nonterminal)->second;
// перемножаем все пары матриц, в теле которых стоит этот нетерминал если он там присутствует
for (const auto &sec: possibly_second_key_set) {
auto key1 = make_pair(nonterminal, sec);
auto key2 = make_pair(sec, nonterminal);
if (grammar.reverseProductionsDouble.count(key1)) {
auto iter = grammar.reverseProductionsDouble.find(key1);
for (const auto &res: iter->second) {
auto is_changed = perform_matrix_mul(res, iter->first.first, iter->first.second);
if (is_changed) {
new_changed_matrices.insert(res);
}
}
}
if (grammar.reverseProductionsDouble.count(key2)) {
auto iter = grammar.reverseProductionsDouble.find(key2);
for (const auto &res: iter->second) {
auto is_changed = perform_matrix_mul(res, iter->first.first, iter->first.second);
if (is_changed) {
new_changed_matrices.insert(res);
}
}
}
}
}
}
if (new_changed_matrices.empty()) {
//copy
break;
} else {
changed_matrices = new_changed_matrices;
//update matrices
}
//transfer
}
}
private:
// не забудь здесь выставить флаги для тех матриц, в которых не нули
// void construct_and_fill_matrices_for_nonterminals() {
// int rows = this->graph.multiple_by_32;
// int cols = this->graph.multiple_by_32 / SQUEEZE; // сжимаем по строкам
// for (auto nonterminal: grammar.nonterminalSet) {
// Matrix matrix = Matrix();
// matrix.matrix_host = alloc_matrix_host_with_zeros(rows, cols);
// matrix.is_changed_host = alloc_matrix_host_with_zeros(1, 1);
// this->nonTerminalToMatrix.insert(make_pair(nonterminal, matrix));
// matrix.matrix_device = alloc_matrix_device_with_zeros(rows, cols);// на гпу
// }// заполнили нулями для хоста
// for (auto &edge:graph.edges) {
// auto i = edge.from;
// auto j = edge.to;
// for (const auto &nonterminal:edge.label) { // заполнилии 1 в i,j для матриц на метках из i в j есть этот нетерминал
// fill_squeezed_matrix(this->nonTerminalToMatrix.find(nonterminal)->second.matrix_host, i, j,
// graph.multiple_by_32);
// }
// }
// for (const auto &nonterminal: grammar.nonterminalSet) {//трансфер данные с цпу на гпу
// auto &matrix = this->nonTerminalToMatrix.find(nonterminal)->second;
// transfer_matrix_from_host_to_gpu(matrix.matrix_host, matrix.matrix_device, rows, cols);
// }
// }
void construct_and_fill_matrices_for_nonterminal_test() {
int rows = this->graph.max_number_of_vertex;
int cols = this->graph.max_number_of_vertex;
int squeezed_cols = this->graph.multiple_by_32;
for (auto nonterminal: grammar.nonterminalSet) {
Matrix matrix = Matrix();
matrix.matrix_host = allocate_matrix_host(rows,cols); //alloc_matrix_host_with_zeros(rows, cols);
// matrix.matrix_squeezed_host = new uint32_t[rows*squeezed_cols];
matrix.is_changed_host = allocate_matrix_host(1,1);
*matrix.is_changed_host = NOT_CHANGED;
this->nonTerminalToMatrix.insert(make_pair(nonterminal, matrix));
}// заполнили нулями для хоста
extra_matrix = allocate_matrix_host(cols,rows); // аллок памяти для доп матрицы
for (auto &edge:graph.edges) {
auto i = edge.from;
auto j = edge.to;
for (const auto &nonterminal:edge.label) { // заполнилии 1 в i,j для матриц на метках из i в j есть этот нетерминал
auto &matrix = this->nonTerminalToMatrix.find(nonterminal)->second;
matrix.matrix_host[i * cols + j] = 1;
//write_bit(matrix.matrix_squeezed_host,i,j,squeezed_cols);
if (*matrix.is_changed_host == NOT_CHANGED) {
*matrix.is_changed_host = IS_CHANGED;
}
}
}
}
void write_bit(uint32_t *m, int i, int j,int cols){
// m[i * cols + (j / 32)] |= (1ULL << (31 - (j % 32)));
m[i * cols + (j / 32)] |= (1 << (31 - (j % 32)));
}
inline void fill_squeezed_matrix(uint32_t *matrix, int i, int j, int size32) {
// строка ок
int cols = size32 / 32;
int position_in_number32 = (SQUEEZE - 1) - (j % SQUEEZE);
int position_in_squezzed_row = j / 32;
matrix[i * cols + position_in_squezzed_row] |= (1L << position_in_number32);
}
// uint32_t *alloc_matrix_host_with_zeros(int rows, int cols) {
// }
// uint32_t *alloc_matrix_device_with_zeros(int rows, int cols) {
// }
void transfer_matrix_from_host_to_gpu(uint32_t *host, uint32_t *device, int rows, int cols) {
//
}
void transfer_matrix_from_gpu_to_host(uint32_t *device, uint32_t *host, int rows, int cols) {
}
void gpu_version(const uint32_t *a, const uint32_t *b, uint32_t *c, int n, uint32_t *is_changed){
// c += ab
// cout<<"H";
uint32_t * a_d = allocate_matrix_device(n,n);
uint32_t * b_d = allocate_matrix_device(n,n);
uint32_t * c_d = allocate_matrix_device(n,n);
uint32_t * flag_device = allocate_matrix_device(1,1);
hipMemcpy( a_d,a, sizeof(uint32_t)*n*n, hipMemcpyHostToDevice);
hipMemcpy( b_d,b, sizeof(uint32_t)*n*n, hipMemcpyHostToDevice);
hipMemcpy( c_d,c, sizeof(uint32_t)*n*n, hipMemcpyHostToDevice);
hipMemcpy( flag_device,is_changed, sizeof(uint32_t), hipMemcpyHostToDevice);
hipDeviceSynchronize();
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid((n + BLOCK_SIZE - 1) / BLOCK_SIZE, (n + BLOCK_SIZE - 1) / BLOCK_SIZE);
gpu_matrix_mult<<<dimGrid,dimBlock>>>(a_d,b_d, c_d, n, n, n,flag_device);
hipDeviceSynchronize();
hipMemcpy( c,c_d, sizeof(uint32_t)*n*n, hipMemcpyDeviceToHost);
hipMemcpy( is_changed,flag_device, sizeof(uint32_t), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
delete_matrix_device(a_d);
delete_matrix_device(b_d);
delete_matrix_device(c_d);
delete_matrix_device(flag_device);
}
// c = ab
void dummy_subring_matrix_mul(const uint32_t *a, int row_a, int col_a, const uint32_t *b, int row_b, int col_b,
uint32_t *c, uint32_t *is_changed) {
if (col_a != row_b) {
printf("The matrices can't be multiplied with each other.\n");
return;
}
gpu_version(a,b,c,row_a,is_changed);
//
// for (int i = 0; i < row_a; i++) {
//
// for (int j = 0; j < col_b; j++) {
// uint32_t value = 0;
//
// for (int k = 0; k < row_b; k++) {
// value |= a[i * row_b + k] & b[k * col_b + j];
// }
// if (*is_changed == NOT_CHANGED && (c[i * col_b + j] | value) != c[i * col_b + j]) {
// *is_changed = IS_CHANGED;
// }
// c[i * col_b + j] |= value;
// }
// }
}
// perform algo
//
// allocate matrices and tables on device
//
// A = C*B
int perform_matrix_mul(const String &head, const String &left, const String &right) {
int rows = graph.max_number_of_vertex;
int cols = graph.max_number_of_vertex;
auto &A = this->nonTerminalToMatrix.at(head);
auto &C = this->nonTerminalToMatrix.at(left);
auto &B = this->nonTerminalToMatrix.at(right);
*A.is_changed_host = 0;
if (head == left) {// нужно создать доп матрицу т.к A = C
copy(C.matrix_host, C.matrix_host + rows * cols, extra_matrix);
dummy_subring_matrix_mul(extra_matrix, rows, cols, B.matrix_host, rows, cols, A.matrix_host,
A.is_changed_host);
}
if (head == right) {//нужно создать доп матрицу т.к A = B
copy(B.matrix_host, B.matrix_host + rows * cols, extra_matrix);
dummy_subring_matrix_mul(C.matrix_host, rows, cols, extra_matrix, rows, cols, A.matrix_host,
A.is_changed_host);
} else {
dummy_subring_matrix_mul(C.matrix_host, rows, cols, B.matrix_host, rows, cols, A.matrix_host,
A.is_changed_host);
}
return *A.is_changed_host;
}
};
int main(int argc, char* argv[]) {
auto solution = Solution(argv[1], argv[2], DELIMITR);
clock_t begin = clock();
solution.compute_result();
clock_t end = clock();
double elapsed_secs = double(end - begin) / CLOCKS_PER_SEC;
ifstream input(argv[3]);
vector<String > res(solution.grammar.nonterminalSet.begin(),solution.grammar.nonterminalSet.end());
sort(res.begin(),res.end());
ofstream outputfile;
outputfile.open(argv[3]);
for (auto &nonterminal: res) {
auto &matrix = solution.nonTerminalToMatrix.at(nonterminal);
outputfile << nonterminal;
for (int i = 0; i < solution.graph.max_number_of_vertex; i++) {
for (int j = 0; j < solution.graph.max_number_of_vertex; j++) {
if (matrix.matrix_host[i * solution.graph.max_number_of_vertex + j] != 0) {
outputfile << " " << i << " " << j;
}
}
}
outputfile << endl;
}
outputfile.close();
cout<<elapsed_secs<<endl;
}
|
.text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z15gpu_matrix_multPjS_S_iiiS_
.globl _Z15gpu_matrix_multPjS_S_iiiS_
.p2align 8
.type _Z15gpu_matrix_multPjS_S_iiiS_,@function
_Z15gpu_matrix_multPjS_S_iiiS_:
s_clause 0x2
s_load_b32 s2, s[0:1], 0x3c
s_load_b32 s3, s[0:1], 0x20
s_load_b32 s4, s[0:1], 0x18
v_and_b32_e32 v1, 0x3ff, v0
v_bfe_u32 v0, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_lshr_b32 s5, s2, 16
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_mad_u64_u32 v[2:3], null, s14, s2, v[1:2]
v_mad_u64_u32 v[3:4], null, s15, s5, v[0:1]
v_cmp_gt_i32_e32 vcc_lo, s3, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_gt_i32_e64 s2, s4, v3
s_and_b32 s2, s2, vcc_lo
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s4, s2
s_cbranch_execz .LBB0_8
s_clause 0x1
s_load_b64 s[4:5], s[0:1], 0x10
s_load_b32 s2, s[0:1], 0x1c
v_mad_u64_u32 v[0:1], null, v3, s3, v[2:3]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v1, 31, v0
v_lshlrev_b64 v[0:1], 2, v[0:1]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, s4, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s5, v1, vcc_lo
s_cmp_lt_i32 s2, 1
global_load_b32 v6, v[0:1], off
s_cbranch_scc1 .LBB0_4
s_load_b128 s[4:7], s[0:1], 0x0
v_mul_lo_u32 v3, v3, s2
v_mov_b32_e32 v7, 0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v4, 31, v3
v_lshlrev_b64 v[4:5], 2, v[3:4]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v4, vcc_lo, s4, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s5, v5, vcc_lo
.p2align 6
.LBB0_3:
v_ashrrev_i32_e32 v3, 31, v2
s_add_i32 s2, s2, -1
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
s_cmp_eq_u32 s2, 0
v_lshlrev_b64 v[8:9], 2, v[2:3]
v_add_nc_u32_e32 v2, s3, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v8, vcc_lo, s6, v8
v_add_co_ci_u32_e32 v9, vcc_lo, s7, v9, vcc_lo
global_load_b32 v3, v[4:5], off
global_load_b32 v8, v[8:9], off
v_add_co_u32 v4, vcc_lo, v4, 4
v_add_co_ci_u32_e32 v5, vcc_lo, 0, v5, vcc_lo
s_waitcnt vmcnt(0)
v_and_or_b32 v7, v8, v3, v7
s_cbranch_scc0 .LBB0_3
s_branch .LBB0_5
.LBB0_4:
v_mov_b32_e32 v7, 0
.LBB0_5:
s_load_b64 s[0:1], s[0:1], 0x28
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_or_b32_e32 v2, v7, v6
v_cmp_ne_u32_e32 vcc_lo, v2, v6
s_waitcnt lgkmcnt(0)
s_load_b32 s2, s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_cmp_eq_u32 s2, 0
s_cselect_b32 s2, -1, 0
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s3, s2, vcc_lo
s_and_saveexec_b32 s2, s3
s_cbranch_execz .LBB0_7
v_dual_mov_b32 v3, 0 :: v_dual_mov_b32 v4, 1
global_store_b32 v3, v4, s[0:1]
.LBB0_7:
s_or_b32 exec_lo, exec_lo, s2
global_store_b32 v[0:1], v2, off
.LBB0_8:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z15gpu_matrix_multPjS_S_iiiS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 304
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 10
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z15gpu_matrix_multPjS_S_iiiS_, .Lfunc_end0-_Z15gpu_matrix_multPjS_S_iiiS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 40
.size: 8
.value_kind: global_buffer
- .offset: 48
.size: 4
.value_kind: hidden_block_count_x
- .offset: 52
.size: 4
.value_kind: hidden_block_count_y
- .offset: 56
.size: 4
.value_kind: hidden_block_count_z
- .offset: 60
.size: 2
.value_kind: hidden_group_size_x
- .offset: 62
.size: 2
.value_kind: hidden_group_size_y
- .offset: 64
.size: 2
.value_kind: hidden_group_size_z
- .offset: 66
.size: 2
.value_kind: hidden_remainder_x
- .offset: 68
.size: 2
.value_kind: hidden_remainder_y
- .offset: 70
.size: 2
.value_kind: hidden_remainder_z
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 104
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 112
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 304
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z15gpu_matrix_multPjS_S_iiiS_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z15gpu_matrix_multPjS_S_iiiS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 10
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata
|
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
|
code for sm_80
Function : _Z15gpu_matrix_multPjS_S_iiiS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000e280000002100 */
/*0030*/ S2R R3, SR_CTAID.Y ; /* 0x0000000000037919 */
/* 0x000e680000002600 */
/*0040*/ S2R R2, SR_TID.Y ; /* 0x0000000000027919 */
/* 0x000e620000002200 */
/*0050*/ IMAD R0, R0, c[0x0][0x0], R5 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0205 */
/*0060*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x180], PT ; /* 0x0000600000007a0c */
/* 0x000fe20003f06270 */
/*0070*/ IMAD R3, R3, c[0x0][0x4], R2 ; /* 0x0000010003037a24 */
/* 0x002fca00078e0202 */
/*0080*/ ISETP.GE.OR P0, PT, R3, c[0x0][0x178], P0 ; /* 0x00005e0003007a0c */
/* 0x000fda0000706670 */
/*0090*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00a0*/ IMAD.MOV.U32 R6, RZ, RZ, c[0x0][0x17c] ; /* 0x00005f00ff067624 */
/* 0x000fe200078e00ff */
/*00b0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*00c0*/ IMAD.MOV.U32 R5, RZ, RZ, 0x4 ; /* 0x00000004ff057424 */
/* 0x000fe400078e00ff */
/*00d0*/ IMAD R12, R3, c[0x0][0x180], R0 ; /* 0x00006000030c7a24 */
/* 0x000fe200078e0200 */
/*00e0*/ ISETP.GE.AND P0, PT, R6, 0x1, PT ; /* 0x000000010600780c */
/* 0x000fc60003f06270 */
/*00f0*/ IMAD.WIDE R12, R12, R5, c[0x0][0x170] ; /* 0x00005c000c0c7625 */
/* 0x000fca00078e0205 */
/*0100*/ LDG.E R2, [R12.64] ; /* 0x000000040c027981 */
/* 0x000162000c1e1900 */
/*0110*/ IMAD.MOV.U32 R23, RZ, RZ, RZ ; /* 0x000000ffff177224 */
/* 0x000fc800078e00ff */
/*0120*/ @!P0 BRA 0xc50 ; /* 0x00000b2000008947 */
/* 0x000fea0003800000 */
/*0130*/ IADD3 R4, R6.reuse, -0x1, RZ ; /* 0xffffffff06047810 */
/* 0x040fe20007ffe0ff */
/*0140*/ HFMA2.MMA R23, -RZ, RZ, 0, 0 ; /* 0x00000000ff177435 */
/* 0x000fe200000001ff */
/*0150*/ LOP3.LUT R6, R6, 0x3, RZ, 0xc0, !PT ; /* 0x0000000306067812 */
/* 0x000fe400078ec0ff */
/*0160*/ ISETP.GE.U32.AND P0, PT, R4, 0x3, PT ; /* 0x000000030400780c */
/* 0x000fe20003f06070 */
/*0170*/ IMAD.MOV.U32 R4, RZ, RZ, RZ ; /* 0x000000ffff047224 */
/* 0x000fd800078e00ff */
/*0180*/ @!P0 BRA 0xb50 ; /* 0x000009c000008947 */
/* 0x000fea0003800000 */
/*0190*/ IADD3 R9, -R6, c[0x0][0x17c], RZ ; /* 0x00005f0006097a10 */
/* 0x000fe20007ffe1ff */
/*01a0*/ ULDC.64 UR6, c[0x0][0x160] ; /* 0x0000580000067ab9 */
/* 0x000fe20000000a00 */
/*01b0*/ IMAD R7, R3, c[0x0][0x17c], RZ ; /* 0x00005f0003077a24 */
/* 0x000fe400078e02ff */
/*01c0*/ ISETP.GT.AND P0, PT, R9, RZ, PT ; /* 0x000000ff0900720c */
/* 0x000fe20003f04270 */
/*01d0*/ IMAD.MOV.U32 R4, RZ, RZ, RZ ; /* 0x000000ffff047224 */
/* 0x000fe400078e00ff */
/*01e0*/ IMAD.WIDE R16, R0, R5, c[0x0][0x168] ; /* 0x00005a0000107625 */
/* 0x000fd400078e0205 */
/*01f0*/ @!P0 BRA 0x9b0 ; /* 0x000007b000008947 */
/* 0x000fea0003800000 */
/*0200*/ ISETP.GT.AND P1, PT, R9, 0xc, PT ; /* 0x0000000c0900780c */
/* 0x000fe40003f24270 */
/*0210*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */
/* 0x000fd60003f0f070 */
/*0220*/ @!P1 BRA 0x6e0 ; /* 0x000004b000009947 */
/* 0x000fea0003800000 */
/*0230*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40003f0e170 */
/*0240*/ IMAD.U32 R14, RZ, RZ, UR6 ; /* 0x00000006ff0e7e24 */
/* 0x000fe2000f8e00ff */
/*0250*/ LDG.E R22, [R16.64] ; /* 0x0000000410167981 */
/* 0x0002a2000c1e1900 */
/*0260*/ IMAD.U32 R15, RZ, RZ, UR7 ; /* 0x00000007ff0f7e24 */
/* 0x000fe4000f8e00ff */
/*0270*/ IMAD.MOV.U32 R8, RZ, RZ, c[0x0][0x180] ; /* 0x00006000ff087624 */
/* 0x000fe400078e00ff */
/*0280*/ IMAD.WIDE R14, R7, 0x4, R14 ; /* 0x00000004070e7825 */
/* 0x000fc800078e020e */
/*0290*/ IMAD.WIDE R18, R8.reuse, 0x4, R16 ; /* 0x0000000408127825 */
/* 0x040fe200078e0210 */
/*02a0*/ LDG.E R28, [R14.64] ; /* 0x000000040e1c7981 */
/* 0x000ea8000c1e1900 */
/*02b0*/ LDG.E R10, [R18.64] ; /* 0x00000004120a7981 */
/* 0x000722000c1e1900 */
/*02c0*/ IMAD.WIDE R16, R8, 0x4, R18 ; /* 0x0000000408107825 */
/* 0x002fc600078e0212 */
/*02d0*/ LDG.E R11, [R14.64+0x4] ; /* 0x000004040e0b7981 */
/* 0x000f28000c1e1900 */
/*02e0*/ LDG.E R24, [R16.64] ; /* 0x0000000410187981 */
/* 0x000322000c1e1900 */
/*02f0*/ IMAD.WIDE R18, R8, 0x4, R16 ; /* 0x0000000408127825 */
/* 0x008fc600078e0210 */
/*0300*/ LDG.E R25, [R14.64+0x8] ; /* 0x000008040e197981 */
/* 0x000ee8000c1e1900 */
/*0310*/ LDG.E R27, [R14.64+0xc] ; /* 0x00000c040e1b7981 */
/* 0x000ee2000c1e1900 */
/*0320*/ IMAD.WIDE R20, R8, 0x4, R18 ; /* 0x0000000408147825 */
/* 0x000fc600078e0212 */
/*0330*/ LDG.E R26, [R18.64] ; /* 0x00000004121a7981 */
/* 0x0008e8000c1e1900 */
/*0340*/ LDG.E R29, [R14.64+0x10] ; /* 0x000010040e1d7981 */
/* 0x000ee8000c1e1900 */
/*0350*/ LDG.E R16, [R20.64] ; /* 0x0000000414107981 */
/* 0x0026e2000c1e1900 */
/*0360*/ LOP3.LUT R28, R23, R22, R28, 0xf8, !PT ; /* 0x00000016171c7212 */
/* 0x004fe200078ef81c */
/*0370*/ IMAD.WIDE R22, R8, 0x4, R20 ; /* 0x0000000408167825 */
/* 0x000fca00078e0214 */
/*0380*/ LDG.E R17, [R22.64] ; /* 0x0000000416117981 */
/* 0x0002a2000c1e1900 */
/*0390*/ LOP3.LUT R18, R28, R10, R11, 0xf8, !PT ; /* 0x0000000a1c127212 */
/* 0x010fe200078ef80b */
/*03a0*/ IMAD.WIDE R10, R8, 0x4, R22 ; /* 0x00000004080a7825 */
/* 0x000fe400078e0216 */
/*03b0*/ LDG.E R28, [R14.64+0x14] ; /* 0x000014040e1c7981 */
/* 0x000ea2000c1e1900 */
/*03c0*/ LOP3.LUT R20, R18, R24, R25, 0xf8, !PT ; /* 0x0000001812147212 */
/* 0x008fc600078ef819 */
/*03d0*/ IMAD.WIDE R18, R8, 0x4, R10 ; /* 0x0000000408127825 */
/* 0x000fe200078e020a */
/*03e0*/ LDG.E R25, [R14.64+0x18] ; /* 0x000018040e197981 */
/* 0x000ee8000c1e1900 */
/*03f0*/ LDG.E R24, [R10.64] ; /* 0x000000040a187981 */
/* 0x0008e4000c1e1900 */
/*0400*/ LOP3.LUT R10, R20, R26, R27, 0xf8, !PT ; /* 0x0000001a140a7212 */
/* 0x010fe200078ef81b */
/*0410*/ IMAD.WIDE R20, R8, 0x4, R18 ; /* 0x0000000408147825 */
/* 0x000fe200078e0212 */
/*0420*/ LDG.E R27, [R14.64+0x1c] ; /* 0x00001c040e1b7981 */
/* 0x000f28000c1e1900 */
/*0430*/ LDG.E R26, [R18.64] ; /* 0x00000004121a7981 */
/* 0x000122000c1e1900 */
/*0440*/ LOP3.LUT R16, R10, R16, R29, 0xf8, !PT ; /* 0x000000100a107212 */
/* 0x000fe200078ef81d */
/*0450*/ IMAD.WIDE R22, R8, 0x4, R20 ; /* 0x0000000408167825 */
/* 0x002fc400078e0214 */
/*0460*/ LDG.E R29, [R20.64] ; /* 0x00000004141d7981 */
/* 0x000328000c1e1900 */
/*0470*/ LDG.E R18, [R14.64+0x20] ; /* 0x000020040e127981 */
/* 0x001f28000c1e1900 */
/*0480*/ LDG.E R19, [R14.64+0x24] ; /* 0x000024040e137981 */
/* 0x000f28000c1e1900 */
/*0490*/ LDG.E R20, [R22.64] ; /* 0x0000000416147981 */
/* 0x002122000c1e1900 */
/*04a0*/ IMAD.WIDE R10, R8, 0x4, R22 ; /* 0x00000004080a7825 */
/* 0x000fc600078e0216 */
/*04b0*/ LDG.E R21, [R14.64+0x28] ; /* 0x000028040e157981 */
/* 0x000f22000c1e1900 */
/*04c0*/ LOP3.LUT R16, R16, R17, R28, 0xf8, !PT ; /* 0x0000001110107212 */
/* 0x004fc800078ef81c */
/*04d0*/ LOP3.LUT R24, R16, R24, R25, 0xf8, !PT ; /* 0x0000001810187212 */
/* 0x008fe200078ef819 */
/*04e0*/ IMAD.WIDE R16, R8, 0x4, R10 ; /* 0x0000000408107825 */
/* 0x000fe400078e020a */
/*04f0*/ LDG.E R10, [R10.64] ; /* 0x000000040a0a7981 */
/* 0x0002a2000c1e1900 */
/*0500*/ LOP3.LUT R26, R24, R26, R27, 0xf8, !PT ; /* 0x0000001a181a7212 */
/* 0x010fc600078ef81b */
/*0510*/ IMAD.WIDE R24, R8.reuse, 0x4, R16 ; /* 0x0000000408187825 */
/* 0x040fe400078e0210 */
/*0520*/ LDG.E R16, [R16.64] ; /* 0x0000000410107981 */
/* 0x000728000c1e1900 */
/*0530*/ IMAD.WIDE R22, R8, 0x4, R24 ; /* 0x0000000408167825 */
/* 0x001fe400078e0218 */
/*0540*/ LDG.E R24, [R24.64] ; /* 0x0000000418187981 */
/* 0x000f22000c1e1900 */
/*0550*/ LOP3.LUT R18, R26, R29, R18, 0xf8, !PT ; /* 0x0000001d1a127212 */
/* 0x000fc600078ef812 */
/*0560*/ LDG.E R29, [R14.64+0x2c] ; /* 0x00002c040e1d7981 */
/* 0x000f28000c1e1900 */
/*0570*/ LDG.E R17, [R14.64+0x34] ; /* 0x000034040e117981 */
/* 0x008ee2000c1e1900 */
/*0580*/ LOP3.LUT R28, R18, R20, R19, 0xf8, !PT ; /* 0x00000014121c7212 */
/* 0x000fe200078ef813 */
/*0590*/ IMAD.WIDE R18, R8.reuse, 0x4, R22 ; /* 0x0000000408127825 */
/* 0x040fe400078e0216 */
/*05a0*/ LDG.E R20, [R14.64+0x30] ; /* 0x000030040e147981 */
/* 0x000ee8000c1e1900 */
/*05b0*/ LDG.E R22, [R22.64] ; /* 0x0000000416167981 */
/* 0x0000e2000c1e1900 */
/*05c0*/ IMAD.WIDE R26, R8, 0x4, R18 ; /* 0x00000004081a7825 */
/* 0x000fc600078e0212 */
/*05d0*/ LDG.E R11, [R18.64] ; /* 0x00000004120b7981 */
/* 0x0022e8000c1e1900 */
/*05e0*/ LDG.E R23, [R14.64+0x3c] ; /* 0x00003c040e177981 */
/* 0x001ee8000c1e1900 */
/*05f0*/ LDG.E R18, [R14.64+0x38] ; /* 0x000038040e127981 */
/* 0x002ee8000c1e1900 */
/*0600*/ LDG.E R19, [R26.64] ; /* 0x000000041a137981 */
/* 0x000ee2000c1e1900 */
/*0610*/ IADD3 R9, R9, -0x10, RZ ; /* 0xfffffff009097810 */
/* 0x000fc80007ffe0ff */
/*0620*/ ISETP.GT.AND P1, PT, R9, 0xc, PT ; /* 0x0000000c0900780c */
/* 0x000fe20003f24270 */
/*0630*/ UIADD3 UR6, UP0, UR6, 0x40, URZ ; /* 0x0000004006067890 */
/* 0x000fe2000ff1e03f */
/*0640*/ IADD3 R4, R4, 0x10, RZ ; /* 0x0000001004047810 */
/* 0x000fc60007ffe0ff */
/*0650*/ UIADD3.X UR7, URZ, UR7, URZ, UP0, !UPT ; /* 0x000000073f077290 */
/* 0x000fe200087fe43f */
/*0660*/ LOP3.LUT R10, R28, R10, R21, 0xf8, !PT ; /* 0x0000000a1c0a7212 */
/* 0x004fc800078ef815 */
/*0670*/ LOP3.LUT R29, R10, R16, R29, 0xf8, !PT ; /* 0x000000100a1d7212 */
/* 0x010fc800078ef81d */
/*0680*/ LOP3.LUT R20, R29, R24, R20, 0xf8, !PT ; /* 0x000000181d147212 */
/* 0x008fc800078ef814 */
/*0690*/ LOP3.LUT R17, R20, R22, R17, 0xf8, !PT ; /* 0x0000001614117212 */
/* 0x000fc800078ef811 */
/*06a0*/ LOP3.LUT R18, R17, R11, R18, 0xf8, !PT ; /* 0x0000000b11127212 */
/* 0x000fe200078ef812 */
/*06b0*/ IMAD.WIDE R16, R8, 0x4, R26 ; /* 0x0000000408107825 */
/* 0x000fc600078e021a */
/*06c0*/ LOP3.LUT R23, R18, R19, R23, 0xf8, !PT ; /* 0x0000001312177212 */
/* 0x000fe200078ef817 */
/*06d0*/ @P1 BRA 0x240 ; /* 0xfffffb6000001947 */
/* 0x000fea000383ffff */
/*06e0*/ ISETP.GT.AND P1, PT, R9, 0x4, PT ; /* 0x000000040900780c */
/* 0x000fda0003f24270 */
/*06f0*/ @!P1 BRA 0x990 ; /* 0x0000029000009947 */
/* 0x000fea0003800000 */
/*0700*/ MOV R10, UR6 ; /* 0x00000006000a7c02 */
/* 0x000fe20008000f00 */
/*0710*/ IMAD.U32 R11, RZ, RZ, UR7 ; /* 0x00000007ff0b7e24 */
/* 0x000fe2000f8e00ff */
/*0720*/ LDG.E R24, [R16.64] ; /* 0x0000000410187981 */
/* 0x0002a2000c1e1900 */
/*0730*/ IMAD.MOV.U32 R25, RZ, RZ, c[0x0][0x180] ; /* 0x00006000ff197624 */
/* 0x000fe400078e00ff */
/*0740*/ IMAD.WIDE R10, R7, 0x4, R10 ; /* 0x00000004070a7825 */
/* 0x000fc800078e020a */
/*0750*/ IMAD.WIDE R26, R25.reuse, 0x4, R16 ; /* 0x00000004191a7825 */
/* 0x040fe200078e0210 */
/*0760*/ LDG.E R8, [R10.64+0x4] ; /* 0x000004040a087981 */
/* 0x000ee8000c1e1900 */
/*0770*/ LDG.E R16, [R10.64] ; /* 0x000000040a107981 */
/* 0x002ea8000c1e1900 */
/*0780*/ LDG.E R22, [R26.64] ; /* 0x000000041a167981 */
/* 0x0002e2000c1e1900 */
/*0790*/ IMAD.WIDE R18, R25, 0x4, R26 ; /* 0x0000000419127825 */
/* 0x000fc600078e021a */
/*07a0*/ LDG.E R29, [R10.64+0x8] ; /* 0x000008040a1d7981 */
/* 0x000f26000c1e1900 */
/*07b0*/ IMAD.WIDE R20, R25.reuse, 0x4, R18 ; /* 0x0000000419147825 */
/* 0x040fe400078e0212 */
/*07c0*/ LDG.E R18, [R18.64] ; /* 0x0000000412127981 */
/* 0x000128000c1e1900 */
/*07d0*/ IMAD.WIDE R14, R25, 0x4, R20 ; /* 0x00000004190e7825 */
/* 0x000fe200078e0214 */
/*07e0*/ LDG.E R28, [R20.64] ; /* 0x00000004141c7981 */
/* 0x000128000c1e1900 */
/*07f0*/ LDG.E R27, [R10.64+0x10] ; /* 0x000010040a1b7981 */
/* 0x002f28000c1e1900 */
/*0800*/ LDG.E R26, [R10.64+0x14] ; /* 0x000014040a1a7981 */
/* 0x000f22000c1e1900 */
/*0810*/ LOP3.LUT R23, R23, R24, R16, 0xf8, !PT ; /* 0x0000001817177212 */
/* 0x004fe200078ef810 */
/*0820*/ IMAD.WIDE R16, R25, 0x4, R14 ; /* 0x0000000419107825 */
/* 0x000fc400078e020e */
/*0830*/ LDG.E R24, [R10.64+0xc] ; /* 0x00000c040a187981 */
/* 0x000ea2000c1e1900 */
/*0840*/ LOP3.LUT R8, R23, R22, R8, 0xf8, !PT ; /* 0x0000001617087212 */
/* 0x008fc600078ef808 */
/*0850*/ LDG.E R14, [R14.64] ; /* 0x000000040e0e7981 */
/* 0x000ee2000c1e1900 */
/*0860*/ IMAD.WIDE R22, R25, 0x4, R16 ; /* 0x0000000419167825 */
/* 0x000fc600078e0210 */
/*0870*/ LDG.E R17, [R16.64] ; /* 0x0000000410117981 */
/* 0x0002e6000c1e1900 */
/*0880*/ IMAD.WIDE R20, R25, 0x4, R22 ; /* 0x0000000419147825 */
/* 0x001fe200078e0216 */
/*0890*/ LDG.E R19, [R22.64] ; /* 0x0000000416137981 */
/* 0x0000e8000c1e1900 */
/*08a0*/ LDG.E R16, [R10.64+0x18] ; /* 0x000018040a107981 */
/* 0x002ee8000c1e1900 */
/*08b0*/ LDG.E R23, [R10.64+0x1c] ; /* 0x00001c040a177981 */
/* 0x001ee8000c1e1900 */
/*08c0*/ LDG.E R22, [R20.64] ; /* 0x0000000414167981 */
/* 0x000ee2000c1e1900 */
/*08d0*/ LOP3.LUT R29, R8, R18, R29, 0xf8, !PT ; /* 0x00000012081d7212 */
/* 0x010fe200078ef81d */
/*08e0*/ UIADD3 UR6, UP0, UR6, 0x20, URZ ; /* 0x0000002006067890 */
/* 0x000fe2000ff1e03f */
/*08f0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fc40003f0e170 */
/*0900*/ IADD3 R4, R4, 0x8, RZ ; /* 0x0000000804047810 */
/* 0x000fe40007ffe0ff */
/*0910*/ IADD3 R9, R9, -0x8, RZ ; /* 0xfffffff809097810 */
/* 0x000fe20007ffe0ff */
/*0920*/ UIADD3.X UR7, URZ, UR7, URZ, UP0, !UPT ; /* 0x000000073f077290 */
/* 0x000fe200087fe43f */
/*0930*/ LOP3.LUT R24, R29, R28, R24, 0xf8, !PT ; /* 0x0000001c1d187212 */
/* 0x004fc800078ef818 */
/*0940*/ LOP3.LUT R14, R24, R14, R27, 0xf8, !PT ; /* 0x0000000e180e7212 */
/* 0x008fc800078ef81b */
/*0950*/ LOP3.LUT R14, R14, R17, R26, 0xf8, !PT ; /* 0x000000110e0e7212 */
/* 0x000fc800078ef81a */
/*0960*/ LOP3.LUT R14, R14, R19, R16, 0xf8, !PT ; /* 0x000000130e0e7212 */
/* 0x000fe200078ef810 */
/*0970*/ IMAD.WIDE R16, R25, 0x4, R20 ; /* 0x0000000419107825 */
/* 0x000fc600078e0214 */
/*0980*/ LOP3.LUT R23, R14, R22, R23, 0xf8, !PT ; /* 0x000000160e177212 */
/* 0x000fe400078ef817 */
/*0990*/ ISETP.NE.OR P0, PT, R9, RZ, P0 ; /* 0x000000ff0900720c */
/* 0x000fda0000705670 */
/*09a0*/ @!P0 BRA 0xb50 ; /* 0x000001a000008947 */
/* 0x000fea0003800000 */
/*09b0*/ MOV R11, UR7 ; /* 0x00000007000b7c02 */
/* 0x000fe20008000f00 */
/*09c0*/ IMAD.U32 R10, RZ, RZ, UR6 ; /* 0x00000006ff0a7e24 */
/* 0x000fe4000f8e00ff */
/*09d0*/ IMAD.MOV.U32 R25, RZ, RZ, c[0x0][0x180] ; /* 0x00006000ff197624 */
/* 0x000fe400078e00ff */
/*09e0*/ IMAD.WIDE R10, R7, 0x4, R10 ; /* 0x00000004070a7825 */
/* 0x000fc800078e020a */
/*09f0*/ IMAD.WIDE R14, R25.reuse, 0x4, R16 ; /* 0x00000004190e7825 */
/* 0x040fe200078e0210 */
/*0a00*/ LDG.E R8, [R10.64] ; /* 0x000000040a087981 */
/* 0x000ea8000c1e1900 */
/*0a10*/ LDG.E R16, [R16.64] ; /* 0x0000000410107981 */
/* 0x000ea2000c1e1900 */
/*0a20*/ IMAD.WIDE R18, R25, 0x4, R14 ; /* 0x0000000419127825 */
/* 0x000fc600078e020e */
/*0a30*/ LDG.E R15, [R14.64] ; /* 0x000000040e0f7981 */
/* 0x000ee8000c1e1900 */
/*0a40*/ LDG.E R22, [R10.64+0x4] ; /* 0x000004040a167981 */
/* 0x000ee2000c1e1900 */
/*0a50*/ IMAD.WIDE R20, R25, 0x4, R18 ; /* 0x0000000419147825 */
/* 0x000fc600078e0212 */
/*0a60*/ LDG.E R27, [R18.64] ; /* 0x00000004121b7981 */
/* 0x000f28000c1e1900 */
/*0a70*/ LDG.E R24, [R10.64+0x8] ; /* 0x000008040a187981 */
/* 0x000f28000c1e1900 */
/*0a80*/ LDG.E R26, [R10.64+0xc] ; /* 0x00000c040a1a7981 */
/* 0x000f28000c1e1900 */
/*0a90*/ LDG.E R29, [R20.64] ; /* 0x00000004141d7981 */
/* 0x000f22000c1e1900 */
/*0aa0*/ IADD3 R9, R9, -0x4, RZ ; /* 0xfffffffc09097810 */
/* 0x000fc80007ffe0ff */
/*0ab0*/ ISETP.NE.AND P0, PT, R9, RZ, PT ; /* 0x000000ff0900720c */
/* 0x000fe20003f05270 */
/*0ac0*/ UIADD3 UR6, UP0, UR6, 0x10, URZ ; /* 0x0000001006067890 */
/* 0x000fe2000ff1e03f */
/*0ad0*/ IADD3 R4, R4, 0x4, RZ ; /* 0x0000000404047810 */
/* 0x000fc60007ffe0ff */
/*0ae0*/ UIADD3.X UR7, URZ, UR7, URZ, UP0, !UPT ; /* 0x000000073f077290 */
/* 0x000fe200087fe43f */
/*0af0*/ LOP3.LUT R8, R23, R16, R8, 0xf8, !PT ; /* 0x0000001017087212 */
/* 0x004fc800078ef808 */
/*0b00*/ LOP3.LUT R8, R8, R15, R22, 0xf8, !PT ; /* 0x0000000f08087212 */
/* 0x008fe200078ef816 */
/*0b10*/ IMAD.WIDE R16, R25, 0x4, R20 ; /* 0x0000000419107825 */
/* 0x000fc600078e0214 */
/*0b20*/ LOP3.LUT R8, R8, R27, R24, 0xf8, !PT ; /* 0x0000001b08087212 */
/* 0x010fc800078ef818 */
/*0b30*/ LOP3.LUT R23, R8, R29, R26, 0xf8, !PT ; /* 0x0000001d08177212 */
/* 0x000fe200078ef81a */
/*0b40*/ @P0 BRA 0x9b0 ; /* 0xfffffe6000000947 */
/* 0x021fea000383ffff */
/*0b50*/ ISETP.NE.AND P0, PT, R6, RZ, PT ; /* 0x000000ff0600720c */
/* 0x000fda0003f05270 */
/*0b60*/ @!P0 BRA 0xc50 ; /* 0x000000e000008947 */
/* 0x000fea0003800000 */
/*0b70*/ IMAD R8, R3, c[0x0][0x17c], R4 ; /* 0x00005f0003087a24 */
/* 0x000fe400078e0204 */
/*0b80*/ IMAD R4, R4, c[0x0][0x180], R0 ; /* 0x0000600004047a24 */
/* 0x000fe400078e0200 */
/*0b90*/ IMAD.WIDE R8, R8, R5, c[0x0][0x160] ; /* 0x0000580008087625 */
/* 0x000fc800078e0205 */
/*0ba0*/ IMAD.WIDE R4, R4, R5, c[0x0][0x168] ; /* 0x00005a0004047625 */
/* 0x000fca00078e0205 */
/*0bb0*/ LDG.E R0, [R4.64] ; /* 0x0000000404007981 */
/* 0x0002a8000c1e1900 */
/*0bc0*/ LDG.E R3, [R8.64] ; /* 0x0000000408037981 */
/* 0x0006a2000c1e1900 */
/*0bd0*/ IADD3 R6, R6, -0x1, RZ ; /* 0xffffffff06067810 */
/* 0x000fe20007ffe0ff */
/*0be0*/ IMAD.MOV.U32 R7, RZ, RZ, c[0x0][0x180] ; /* 0x00006000ff077624 */
/* 0x000fc600078e00ff */
/*0bf0*/ ISETP.NE.AND P0, PT, R6, RZ, PT ; /* 0x000000ff0600720c */
/* 0x000fe20003f05270 */
/*0c00*/ IMAD.WIDE R4, R7, 0x4, R4 ; /* 0x0000000407047825 */
/* 0x002fe200078e0204 */
/*0c10*/ IADD3 R8, P1, R8, 0x4, RZ ; /* 0x0000000408087810 */
/* 0x008fca0007f3e0ff */
/*0c20*/ IMAD.X R9, RZ, RZ, R9, P1 ; /* 0x000000ffff097224 */
/* 0x000fe200008e0609 */
/*0c30*/ LOP3.LUT R23, R23, R0, R3, 0xf8, !PT ; /* 0x0000000017177212 */
/* 0x004fca00078ef803 */
/*0c40*/ @P0 BRA 0xbb0 ; /* 0xffffff6000000947 */
/* 0x000fea000383ffff */
/*0c50*/ MOV R4, c[0x0][0x188] ; /* 0x0000620000047a02 */
/* 0x000fe20000000f00 */
/*0c60*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x0][0x18c] ; /* 0x00006300ff057624 */
/* 0x000fca00078e00ff */
/*0c70*/ LDG.E R0, [R4.64] ; /* 0x0000000404007981 */
/* 0x000ea2000c1e1900 */
/*0c80*/ LOP3.LUT R23, R23, R2, RZ, 0xfc, !PT ; /* 0x0000000217177212 */
/* 0x020fe400078efcff */
/*0c90*/ ISETP.NE.AND P0, PT, R0, RZ, PT ; /* 0x000000ff0000720c */
/* 0x004fc80003f05270 */
/*0ca0*/ ISETP.EQ.OR P0, PT, R23, R2, P0 ; /* 0x000000021700720c */
/* 0x000fda0000702670 */
/*0cb0*/ @!P0 IMAD.MOV.U32 R3, RZ, RZ, 0x1 ; /* 0x00000001ff038424 */
/* 0x000fca00078e00ff */
/*0cc0*/ @!P0 STG.E [R4.64], R3 ; /* 0x0000000304008986 */
/* 0x000fe8000c101904 */
/*0cd0*/ STG.E [R12.64], R23 ; /* 0x000000170c007986 */
/* 0x000fe2000c101904 */
/*0ce0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0cf0*/ BRA 0xcf0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0d00*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0d10*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0d20*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0d30*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0d40*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0d50*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0d60*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0d70*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
|
.text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z15gpu_matrix_multPjS_S_iiiS_
.globl _Z15gpu_matrix_multPjS_S_iiiS_
.p2align 8
.type _Z15gpu_matrix_multPjS_S_iiiS_,@function
_Z15gpu_matrix_multPjS_S_iiiS_:
s_clause 0x2
s_load_b32 s2, s[0:1], 0x3c
s_load_b32 s3, s[0:1], 0x20
s_load_b32 s4, s[0:1], 0x18
v_and_b32_e32 v1, 0x3ff, v0
v_bfe_u32 v0, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_lshr_b32 s5, s2, 16
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_mad_u64_u32 v[2:3], null, s14, s2, v[1:2]
v_mad_u64_u32 v[3:4], null, s15, s5, v[0:1]
v_cmp_gt_i32_e32 vcc_lo, s3, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_gt_i32_e64 s2, s4, v3
s_and_b32 s2, s2, vcc_lo
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s4, s2
s_cbranch_execz .LBB0_8
s_clause 0x1
s_load_b64 s[4:5], s[0:1], 0x10
s_load_b32 s2, s[0:1], 0x1c
v_mad_u64_u32 v[0:1], null, v3, s3, v[2:3]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v1, 31, v0
v_lshlrev_b64 v[0:1], 2, v[0:1]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, s4, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s5, v1, vcc_lo
s_cmp_lt_i32 s2, 1
global_load_b32 v6, v[0:1], off
s_cbranch_scc1 .LBB0_4
s_load_b128 s[4:7], s[0:1], 0x0
v_mul_lo_u32 v3, v3, s2
v_mov_b32_e32 v7, 0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v4, 31, v3
v_lshlrev_b64 v[4:5], 2, v[3:4]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v4, vcc_lo, s4, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s5, v5, vcc_lo
.p2align 6
.LBB0_3:
v_ashrrev_i32_e32 v3, 31, v2
s_add_i32 s2, s2, -1
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
s_cmp_eq_u32 s2, 0
v_lshlrev_b64 v[8:9], 2, v[2:3]
v_add_nc_u32_e32 v2, s3, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v8, vcc_lo, s6, v8
v_add_co_ci_u32_e32 v9, vcc_lo, s7, v9, vcc_lo
global_load_b32 v3, v[4:5], off
global_load_b32 v8, v[8:9], off
v_add_co_u32 v4, vcc_lo, v4, 4
v_add_co_ci_u32_e32 v5, vcc_lo, 0, v5, vcc_lo
s_waitcnt vmcnt(0)
v_and_or_b32 v7, v8, v3, v7
s_cbranch_scc0 .LBB0_3
s_branch .LBB0_5
.LBB0_4:
v_mov_b32_e32 v7, 0
.LBB0_5:
s_load_b64 s[0:1], s[0:1], 0x28
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_or_b32_e32 v2, v7, v6
v_cmp_ne_u32_e32 vcc_lo, v2, v6
s_waitcnt lgkmcnt(0)
s_load_b32 s2, s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_cmp_eq_u32 s2, 0
s_cselect_b32 s2, -1, 0
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s3, s2, vcc_lo
s_and_saveexec_b32 s2, s3
s_cbranch_execz .LBB0_7
v_dual_mov_b32 v3, 0 :: v_dual_mov_b32 v4, 1
global_store_b32 v3, v4, s[0:1]
.LBB0_7:
s_or_b32 exec_lo, exec_lo, s2
global_store_b32 v[0:1], v2, off
.LBB0_8:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z15gpu_matrix_multPjS_S_iiiS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 304
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 10
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z15gpu_matrix_multPjS_S_iiiS_, .Lfunc_end0-_Z15gpu_matrix_multPjS_S_iiiS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 40
.size: 8
.value_kind: global_buffer
- .offset: 48
.size: 4
.value_kind: hidden_block_count_x
- .offset: 52
.size: 4
.value_kind: hidden_block_count_y
- .offset: 56
.size: 4
.value_kind: hidden_block_count_z
- .offset: 60
.size: 2
.value_kind: hidden_group_size_x
- .offset: 62
.size: 2
.value_kind: hidden_group_size_y
- .offset: 64
.size: 2
.value_kind: hidden_group_size_z
- .offset: 66
.size: 2
.value_kind: hidden_remainder_x
- .offset: 68
.size: 2
.value_kind: hidden_remainder_y
- .offset: 70
.size: 2
.value_kind: hidden_remainder_z
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 104
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 112
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 304
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z15gpu_matrix_multPjS_S_iiiS_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z15gpu_matrix_multPjS_S_iiiS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 10
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata
|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
|
/* SorensonPar.cu
Parallel Implementation of Algorithm 4.1
as discussed in Sorenson and Parberry's
1994 paper "Two Fast Parallel Prime Number
Sieves".
Authors:
Daniel Anzaldo
David Frank
Antonio Lanfranchi
*/
// Visual Studio Dependencies (Can be commented out)
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
// C dependencies
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
// C++ dependencies
#include <algorithm>
typedef unsigned long long big;
// GLOBAL VARIABLES--------------------------------------
typedef struct Wheel_t // Struct-of-Arrays Wheel
{
bool * rp; // Numbers relatively prime to m
big * dist; // D s.t. x + d is the smallest integer >dist[x] relatively prime to m
} Wheel_k;
bool * S; // Global shared bit array of numbers up to N
int P; // Global number of processors
bool check_cuda_status = false; // turn to false when running on circe
/* These are for tracking time */
struct timezone myTimezone;
struct timeval startTime, endTime;
// HOST FUNCTION HEADERS---------------------------------
/* gcd
Host version of the Euclidean Method
*/
__host__ big gcd(big u, big v);
/* EratosthenesSieve
HELPER: for Algorithm 4.1 Sequential Portion
The most basic form of generating primes.
Used to help find the first k primes.
Returns the k-th prime.
*/
big EratosthenesSieve(long double x);
/* Algorithm 4.1 Sequential Portion
Running Time: O(sqrt(n))
Space: O(sqrt(n)) up to O(sqrt(n)/log log n)
*/
cudaError_t algorithm4_1(big n);
/* Algorithm 4.1 Helper: Parallel Sieve
All CUDA-related functionality goes here.
This code will change for different kernel versions.
*/
cudaError_t parallelSieve(
big n, big k, big m, const Wheel_k &wheel, big range);
/* Frees the memory allocated on the device and returns any errors*/
cudaError_t cleanup(bool *d_S, Wheel_k &wheel, cudaError_t cudaStatus);
/* Set a checkpoint and show the total running time in seconds */
double report_running_time(const char *arr);
// DEVICE MATH FUNCTIONS---------------------------------
/* gcd_d
Device version of the Euclidean Method
find number c such that: a = sc, b = tc
*/
__device__ big gcd_d(big a, big b)
{
big tmp;
while (b!=0)
{
tmp = a;
a = b;
b = tmp%b;
}
return a;
}
/* gcd_d
Device version of the Binary Method
with bit arithmetic
*/
/*
__device__ big gcd_d(big u, big v)
{
big g = 1;
while ((u % 2 == 0) && (v % 2 == 0))
{
g <<= 1;
u >>= 1;
v >>= 1;
}
while (u != 0 && v != 0)
if (u % 2 == 0) u >>= 1;
else if (v % 2 == 0) v >>= 1;
else if (u > v) u = (u - v) >> 1;
else v = (v - u) >> 1;
return (g * (u + v));
}
*/
/* sqrt_d
Device version of the Square Root Function
Babylonian Method
*/
__device__ big sqrt_d(big a)
{
big root = a/2;
for (big n = 0; n < 10; n++)
{
root = 0.5 * (root + (a/root));
}
return root;
}
__device__ big min_d(big a, big b)
{
return (a < b) ? a : b;
}
__device__ big max_d(big a, big b)
{
return (a > b) ? a : b;
}
// ALGORITHM 4.1 KERNEL VERSIONS-------------------------
/* Algorithm 4.1: Parallel Sieve Kernel version 1
Parallelization: O(sqrt(n)) processors
Space: O(sqrt(n)) up to O(sqrt(n)/log log n)
PRAM Mode: Exclusive Read, Exclusive Write (EREW)
Remarks: No optimizations yet performed.
For n = 1 billion, it uses 31623 threads
*/
__global__ void parallelSieveKernel(
big n, big k, big m, Wheel_k d_wheel, big range, bool *d_S)
{
big sqrt_N = sqrt_d(n);
// Express the sieve in thread mode.
big i = threadIdx.x + blockIdx.x * blockDim.x;
// Threads beyond n will not do work.
if (i <= n)
{
big L = range * i + 1;
big R = min_d(range * (i + 1), n);
/* Range Sieving */
for (big x = L; x < R; x++)
d_S[x] = d_wheel.rp[x % m];
/* For every prime from prime[k] up to sqrt(N) */
for (big q = k; q < sqrt_N; q++)
{
if (d_S[q])
{
/* Compute smallest f s.t.
gcd_d(qf, m) == 1,
qf >= max_d(L, q^2) */
big f = max_d(q - 1, (big)((L / q) - 1));
/* f = f + W_k[f mod m].dist */
f += d_wheel.dist[f % m];
/* Remove the multiples of current prime */
while ((q * f) <= R)
{
// EREW Precaution. May need to be atomic operation.
if (!(d_S[q * f])) d_S[q * f] = false;
f += d_wheel.dist[f % m];
}
}
}
}
}
/* TODO: Algorithm 4.1: Parallel Sieve Kernel version 2
Remarks: Prime table S within [0, sqrt(n)] migrated to const memory
Wheel completely migrated to const memory
Beware that const memory is only 64kB.
Benchmark with the Profiler first before creating this!
*/
__global__ void parallelSieveKernel2(
big n, big k, big m, Wheel_k d_wheel, big range, bool *d_S);
/* TODO: Algorithm 4.1: Parallel Sieve Kernel version 3
Remarks: Prime table S within [0, sqrt(n)] migrated to const memory
Wheel completely migrated to const memory
Probable use of the shared memory
Probable use of registers
Beware that register is only 4B or 32b.
Beware that const memory is only 64kB.
Benchmark with the Profiler first before creating this!
*/
__global__ void parallelSieveKernel3(
big n, big k, big m, Wheel_k d_wheel, big range, bool *d_S);
/* MAIN
To run this add the ff. args:
1. N = the number up to which you're sieving
*/
int main(int argc, char **argv)
{
big N = (big)strtoull(argv[1], NULL, 10);
S = new bool[N]; //(bool*)malloc(N * sizeof(bool));
printf("Find primes up to: %llu\n\n", N);
/* start counting time */
gettimeofday(&startTime, &myTimezone);
cudaError_t x = algorithm4_1(N);
/* check the total running time */
report_running_time("Algorithm 4.1");
if (check_cuda_status)
{
if (x != cudaSuccess) {
printf("Algorithm 4.1 failed to execute!");
return 1;
}
}
// Display the primes.
for (int i = 0; i < N; i++)
if (S[i]) printf("%llu ", i);
delete[] S;
return 0;
}
// HOST FUNCTION DEFINITIONS-----------------------------
// Euclidean Method
__host__ big gcd(big u, big v)
{
big tmp;
while (v != 0)
{
tmp = u;
u = v;
v = tmp%v;
}
return u;
}
// Binary Method
/*
__host__ big gcd(big u, big v)
{
big g = 1;
while ((u % 2 == 0) && (v % 2 == 0))
{
g <<= 1;
u >>= 1;
v >>= 1;
}
while (u != 0 && v != 0)
if (u % 2 == 0) u >>= 1;
else if (v % 2 == 0) v >>= 1;
else if (u > v) u = (u - v) >> 1;
else v = (v - u) >> 1;
return (g * (u + v));
}
*/
big EratosthenesSieve(long double k, big n)
{
big kthPrime = 0;
// 0 and 1 are non-primes.
S[0] = S[1] = false;
for (big i = 2; i < n; i++)
S[i] = true;
// Simple Sieving Operation.
for (big i = 2; i < (big)sqrtl(n); i++)
if (S[i])
{
int j;
for (j = i*i; j < n; j += i)
S[j] = false;
}
// Find the k-th prime.
for (big i = k; i > 2; i--)
if (S[i]) kthPrime = i;
return kthPrime;
}
cudaError_t algorithm4_1(big n)
{
/* VARIABLES */
big range;
big sqrt_N = (big)sqrtl((long double)n);
Wheel_k wheel;
/* Allocation of wheel */
wheel.rp = new bool[n];
wheel.dist = new big[n];
/* Find the first k primes
K = maximal s.t. S[K] <= (log N) / 4
Find primes up to sqrt(N) */
big k = EratosthenesSieve(log10l((long double)n) / 4, n);
/* Find the product of the first k primes m */
big m = 1;
for (big ii = 0; ii < k; ii++)
if (S[ii]) m *= ii;
/* Compute k-th wheel W_k
FUTURE OPTIMIZATION: Delegate kernel for computation */
for (big x = 0; x < n; x++)
{
// True if rp[x] is relatively prime to m
wheel.rp[x] = (gcd(x, m) == 1);
/* This is d s.t. x + d is
the smallest integer >dist[x]
relatively prime to m */
int d = 0;
while (gcd(x + d, m) != 1)
d++;
wheel.dist[x] = d;
}
/* Delta = ceil(n/p) */
range = (big)ceill(n / (long double)P);
/* PARALLEL PART */
cudaError_t parallelStatus = parallelSieve(n, k, m, wheel, range);
if (check_cuda_status)
{
if (parallelStatus != cudaSuccess) {
fprintf(stderr, "parallelSieve() failed!");
}
}
/* FREE */
delete[] wheel.rp;
delete[] wheel.dist;
return parallelStatus;
}
cudaError_t parallelSieve(
big n, big k, big m, const Wheel_k &wheel, big range)
{
cudaError_t cudaStatus;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
/* The Number Field S
will be migrated to GLOBAL memory
OPTIMIZATION: ranges will be migrated to SHARED memory
OPTIMIZATION: [0, sqrt(n)] will be migrated to CONSTANT memory
*/
bool * d_S = NULL;
// The Wheel Precomputed Table
// will be migrated to GLOBAL memory
// OPTIMIZATION: may be migrated to CONSTANT memory as well
Wheel_k d_wheel;
d_wheel.rp = NULL;
d_wheel.dist = NULL;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (check_cuda_status)
{
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?\n");
return cudaStatus;
}
}
// Measure start time for CUDA portion
cudaEventRecord(start, 0);
// CUDA Memory Allocations.
cudaStatus = cudaMalloc((void**)&d_S, n * sizeof(bool));
if (check_cuda_status)
{
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed on number field S!\n");
return cleanup(d_S, d_wheel, cudaStatus);
}
}
cudaStatus = cudaMalloc((void**)&(d_wheel.rp), n * sizeof(bool));
if (check_cuda_status)
{
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed on wheel.rp!\n");
return cleanup(d_S, d_wheel, cudaStatus);
}
}
cudaStatus = cudaMalloc((void**)&(d_wheel.dist), n * sizeof(big));
if (check_cuda_status)
{
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed on wheel.dist!\n");
return cleanup(d_S, d_wheel, cudaStatus);
}
}
// cudaMemCpy -> Device
cudaStatus = cudaMemcpy(d_S, S, n * sizeof(bool), cudaMemcpyHostToDevice);
if (check_cuda_status)
{
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed! S->d_S.\n");
return cleanup(d_S, d_wheel, cudaStatus);
}
}
cudaStatus = cudaMemcpy(d_wheel.rp, wheel.rp, n * sizeof(bool), cudaMemcpyHostToDevice);
if (check_cuda_status)
{
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed! wheel.rp->d_wheel.rp\n");
return cleanup(d_S, d_wheel, cudaStatus);
}
}
cudaStatus = cudaMemcpy(d_wheel.dist, wheel.dist, n * sizeof(big), cudaMemcpyHostToDevice);
if (check_cuda_status)
{
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed! wheel.dist->d_wheel.dist\n");
return cleanup(d_S, d_wheel, cudaStatus);
}
}
// Kernel Call
dim3 gridSize(ceill(ceill(sqrt(n))/256), 1, 1);
dim3 blockSize(256, 1, 1);
parallelSieveKernel<<<gridSize, blockSize>>>(n, k, m, wheel, range, d_S);
cudaStatus = cudaGetLastError();
if (check_cuda_status)
{
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "parallelSieveKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
return cleanup(d_S, d_wheel, cudaStatus);
}
}
cudaStatus = cudaDeviceSynchronize();
if (check_cuda_status)
{
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
return cleanup(d_S, d_wheel, cudaStatus);
}
}
// cudaMemCpy -> Host
cudaStatus = cudaMemcpy(S, d_S, n * sizeof(bool), cudaMemcpyDeviceToHost);
if (check_cuda_status)
{
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed! d_S->S.\n");
return cleanup(d_S, d_wheel, cudaStatus);
}
}
// Measure stop time for CUDA portion
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("Time to generate: %0.5f ms\n", elapsedTime);
// cudaFree
return cleanup(d_S, d_wheel, cudaStatus);
}
cudaError_t cleanup(bool *d_S, Wheel_k &wheel, cudaError_t cudaStatus)
{
cudaFree(d_S);
cudaFree(wheel.rp);
cudaFree(wheel.dist);
return cudaStatus;
}
/*
set a checkpoint and show the (natural) running time in seconds
*/
double report_running_time(const char *arr) {
long sec_diff, usec_diff;
gettimeofday(&endTime, &myTimezone);
sec_diff = endTime.tv_sec - startTime.tv_sec;
usec_diff= endTime.tv_usec-startTime.tv_usec;
if(usec_diff < 0) {
sec_diff --;
usec_diff += 1000000;
}
printf("Running time for %s: %ld.%06ld sec\n\n", arr, sec_diff, usec_diff);
return (double)(sec_diff*1.0 + usec_diff/1000000.0);
}
|
.file "tmpxft_00195b50_00000000-6_SorensonPar.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2350:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2350:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z5gcd_dyy
.type _Z5gcd_dyy, @function
_Z5gcd_dyy:
.LFB2337:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2337:
.size _Z5gcd_dyy, .-_Z5gcd_dyy
.globl _Z6sqrt_dy
.type _Z6sqrt_dy, @function
_Z6sqrt_dy:
.LFB2338:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2338:
.size _Z6sqrt_dy, .-_Z6sqrt_dy
.globl _Z5min_dyy
.type _Z5min_dyy, @function
_Z5min_dyy:
.LFB2339:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2339:
.size _Z5min_dyy, .-_Z5min_dyy
.globl _Z5max_dyy
.type _Z5max_dyy, @function
_Z5max_dyy:
.LFB2340:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2340:
.size _Z5max_dyy, .-_Z5max_dyy
.globl _Z3gcdyy
.type _Z3gcdyy, @function
_Z3gcdyy:
.LFB2342:
.cfi_startproc
endbr64
movq %rdi, %rax
testq %rsi, %rsi
je .L14
.L13:
movq %rsi, %rcx
movl $0, %edx
divq %rsi
movq %rdx, %rsi
movq %rcx, %rax
testq %rdx, %rdx
jne .L13
.L11:
movq %rcx, %rax
ret
.L14:
movq %rdi, %rcx
jmp .L11
.cfi_endproc
.LFE2342:
.size _Z3gcdyy, .-_Z3gcdyy
.globl _Z17EratosthenesSieveey
.type _Z17EratosthenesSieveey, @function
_Z17EratosthenesSieveey:
.LFB2343:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $16, %rsp
.cfi_def_cfa_offset 48
movq %rdi, %rbx
movq S(%rip), %rax
movb $0, 1(%rax)
movq S(%rip), %rax
movb $0, (%rax)
cmpq $2, %rdi
jbe .L17
movl $2, %eax
.L18:
movq S(%rip), %rdx
movb $1, (%rdx,%rax)
addq $1, %rax
cmpq %rax, %rbx
jne .L18
.L17:
movl $2, %ebp
movabsq $-9223372036854775808, %r12
jmp .L19
.L44:
movl %ebp, %esi
movl %ebp, %edx
imull %ebp, %edx
movslq %edx, %rax
cmpq %rbx, %rax
jnb .L20
.L21:
movq S(%rip), %rcx
movb $0, (%rcx,%rax)
leal (%rdx,%rsi), %eax
movl %eax, %edx
cltq
cmpq %rbx, %rax
jb .L21
jmp .L20
.L45:
fadds .LC0(%rip)
jmp .L22
.L40:
leaq -16(%rsp), %rsp
.cfi_def_cfa_offset 64
fstpt (%rsp)
call sqrtl@PLT
addq $16, %rsp
.cfi_def_cfa_offset 48
jmp .L25
.L27:
fsubs .LC2(%rip)
fnstcw 14(%rsp)
movzwl 14(%rsp), %eax
orb $12, %ah
movw %ax, 12(%rsp)
fldcw 12(%rsp)
fistpq (%rsp)
fldcw 14(%rsp)
movq (%rsp), %rax
xorq %r12, %rax
.L28:
cmpq %rax, %rbp
jnb .L43
movq S(%rip), %rax
cmpb $0, (%rax,%rbp)
jne .L44
.L20:
addq $1, %rbp
.L19:
movq %rbx, (%rsp)
fildq (%rsp)
testq %rbx, %rbx
js .L45
.L22:
fldz
fucomip %st(1), %st
ja .L40
fsqrt
.L25:
flds .LC2(%rip)
fxch %st(1)
fcomi %st(1), %st
fstp %st(1)
jnb .L27
fnstcw 14(%rsp)
movzwl 14(%rsp), %eax
orb $12, %ah
movw %ax, 12(%rsp)
fldcw 12(%rsp)
fistpq (%rsp)
fldcw 14(%rsp)
movq (%rsp), %rax
jmp .L28
.L43:
flds .LC2(%rip)
fldt 48(%rsp)
fcomi %st(1), %st
fstp %st(1)
jnb .L29
fldcw 12(%rsp)
fistpq (%rsp)
fldcw 14(%rsp)
movq (%rsp), %rax
.L30:
cmpq $2, %rax
jbe .L34
movq S(%rip), %rcx
movl $0, %edx
.L33:
cmpb $0, (%rcx,%rax)
cmovne %rax, %rdx
subq $1, %rax
cmpq $2, %rax
jne .L33
.L41:
movq %rdx, %rax
addq $16, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L29:
.cfi_restore_state
fstp %st(0)
fldt 48(%rsp)
fsubs .LC2(%rip)
fldcw 12(%rsp)
fistpq (%rsp)
fldcw 14(%rsp)
movq (%rsp), %rax
btcq $63, %rax
jmp .L30
.L34:
movl $0, %edx
jmp .L41
.cfi_endproc
.LFE2343:
.size _Z17EratosthenesSieveey, .-_Z17EratosthenesSieveey
.globl _Z7cleanupPbR7Wheel_t9cudaError
.type _Z7cleanupPbR7Wheel_t9cudaError, @function
_Z7cleanupPbR7Wheel_t9cudaError:
.LFB2346:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $8, %rsp
.cfi_def_cfa_offset 32
movq %rsi, %rbx
movl %edx, %ebp
call cudaFree@PLT
movq (%rbx), %rdi
call cudaFree@PLT
movq 8(%rbx), %rdi
call cudaFree@PLT
movl %ebp, %eax
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2346:
.size _Z7cleanupPbR7Wheel_t9cudaError, .-_Z7cleanupPbR7Wheel_t9cudaError
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC5:
.string "Running time for %s: %ld.%06ld sec\n\n"
.text
.globl _Z19report_running_timePKc
.type _Z19report_running_timePKc, @function
_Z19report_running_timePKc:
.LFB2347:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
movq %rdi, %r12
leaq myTimezone(%rip), %rsi
leaq endTime(%rip), %rdi
call gettimeofday@PLT
movq endTime(%rip), %rbp
subq startTime(%rip), %rbp
movq 8+endTime(%rip), %rbx
subq 8+startTime(%rip), %rbx
js .L51
.L49:
movq %rbx, %r8
movq %rbp, %rcx
movq %r12, %rdx
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
pxor %xmm0, %xmm0
cvtsi2sdq %rbx, %xmm0
divsd .LC6(%rip), %xmm0
pxor %xmm1, %xmm1
cvtsi2sdq %rbp, %xmm1
addsd %xmm1, %xmm0
popq %rbx
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L51:
.cfi_restore_state
subq $1, %rbp
addq $1000000, %rbx
jmp .L49
.cfi_endproc
.LFE2347:
.size _Z19report_running_timePKc, .-_Z19report_running_timePKc
.globl _Z51__device_stub__Z19parallelSieveKernelyyy7Wheel_tyPbyyyR7Wheel_tyPb
.type _Z51__device_stub__Z19parallelSieveKernelyyy7Wheel_tyPbyyyR7Wheel_tyPb, @function
_Z51__device_stub__Z19parallelSieveKernelyyy7Wheel_tyPbyyyR7Wheel_tyPb:
.LFB2372:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movq %r8, 16(%rsp)
movq %r9, 8(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
movq %rcx, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 8(%rsp), %rax
movq %rax, 152(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L56
.L52:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L57
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L56:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z19parallelSieveKernelyyy7Wheel_tyPb(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L52
.L57:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2372:
.size _Z51__device_stub__Z19parallelSieveKernelyyy7Wheel_tyPbyyyR7Wheel_tyPb, .-_Z51__device_stub__Z19parallelSieveKernelyyy7Wheel_tyPbyyyR7Wheel_tyPb
.globl _Z19parallelSieveKernelyyy7Wheel_tyPb
.type _Z19parallelSieveKernelyyy7Wheel_tyPb, @function
_Z19parallelSieveKernelyyy7Wheel_tyPb:
.LFB2373:
.cfi_startproc
endbr64
subq $24, %rsp
.cfi_def_cfa_offset 32
movq %rcx, (%rsp)
movq %r8, 8(%rsp)
movq %r9, %r8
movq %rsp, %rcx
movq 32(%rsp), %r9
call _Z51__device_stub__Z19parallelSieveKernelyyy7Wheel_tyPbyyyR7Wheel_tyPb
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2373:
.size _Z19parallelSieveKernelyyy7Wheel_tyPb, .-_Z19parallelSieveKernelyyy7Wheel_tyPb
.section .rodata.str1.8
.align 8
.LC7:
.string "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?\n"
.align 8
.LC8:
.string "cudaMalloc failed on number field S!\n"
.align 8
.LC9:
.string "cudaMalloc failed on wheel.rp!\n"
.align 8
.LC10:
.string "cudaMalloc failed on wheel.dist!\n"
.section .rodata.str1.1,"aMS",@progbits,1
.LC11:
.string "cudaMemcpy failed! S->d_S.\n"
.section .rodata.str1.8
.align 8
.LC12:
.string "cudaMemcpy failed! wheel.rp->d_wheel.rp\n"
.align 8
.LC13:
.string "cudaMemcpy failed! wheel.dist->d_wheel.dist\n"
.align 8
.LC19:
.string "parallelSieveKernel launch failed: %s\n"
.align 8
.LC20:
.string "cudaDeviceSynchronize returned error code %d after launching addKernel!\n"
.section .rodata.str1.1
.LC21:
.string "cudaMemcpy failed! d_S->S.\n"
.LC22:
.string "Time to generate: %0.5f ms\n"
.text
.globl _Z13parallelSieveyyyRK7Wheel_ty
.type _Z13parallelSieveyyyRK7Wheel_ty, @function
_Z13parallelSieveyyyRK7Wheel_ty:
.LFB2345:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $136, %rsp
.cfi_def_cfa_offset 192
movq %rdi, %rbx
movq %rsi, 8(%rsp)
movq %rdx, %r12
movq %rcx, %rbp
movq %r8, %r13
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 32(%rsp), %rdi
call cudaEventCreate@PLT
leaq 40(%rsp), %rdi
call cudaEventCreate@PLT
movq $0, 48(%rsp)
movq $0, 80(%rsp)
movq $0, 88(%rsp)
movl $0, %edi
call cudaSetDevice@PLT
cmpb $0, check_cuda_status(%rip)
je .L61
movl %eax, %r14d
testl %eax, %eax
jne .L84
.L61:
movl $0, %esi
movq 32(%rsp), %rdi
call cudaEventRecord@PLT
leaq 48(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movl %eax, %r14d
cmpb $0, check_cuda_status(%rip)
je .L63
testl %eax, %eax
jne .L85
.L63:
leaq 80(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movl %eax, %r14d
cmpb $0, check_cuda_status(%rip)
je .L64
testl %eax, %eax
jne .L86
.L64:
leaq 0(,%rbx,8), %r15
leaq 88(%rsp), %rdi
movq %r15, %rsi
call cudaMalloc@PLT
movl %eax, %r14d
cmpb $0, check_cuda_status(%rip)
je .L65
testl %eax, %eax
jne .L87
.L65:
movl $1, %ecx
movq %rbx, %rdx
movq S(%rip), %rsi
movq 48(%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %r14d
cmpb $0, check_cuda_status(%rip)
je .L66
testl %eax, %eax
jne .L88
.L66:
movq 0(%rbp), %rsi
movl $1, %ecx
movq %rbx, %rdx
movq 80(%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %r14d
cmpb $0, check_cuda_status(%rip)
je .L67
testl %eax, %eax
jne .L89
.L67:
movq 8(%rbp), %rsi
movl $1, %ecx
movq %r15, %rdx
movq 88(%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %r14d
cmpb $0, check_cuda_status(%rip)
je .L68
testl %eax, %eax
jne .L90
.L68:
testq %rbx, %rbx
js .L69
pxor %xmm0, %xmm0
cvtsi2sdq %rbx, %xmm0
.L70:
pxor %xmm1, %xmm1
ucomisd %xmm0, %xmm1
ja .L81
sqrtsd %xmm0, %xmm0
.L73:
movsd %xmm0, 16(%rsp)
movsd .LC23(%rip), %xmm2
movapd %xmm0, %xmm1
andpd %xmm2, %xmm1
movsd .LC15(%rip), %xmm3
ucomisd %xmm1, %xmm3
jbe .L74
cvttsd2siq %xmm0, %rax
pxor %xmm1, %xmm1
cvtsi2sdq %rax, %xmm1
movapd %xmm0, %xmm3
cmpnlesd %xmm1, %xmm3
movsd .LC17(%rip), %xmm4
andpd %xmm4, %xmm3
addsd %xmm3, %xmm1
andnpd %xmm0, %xmm2
orpd %xmm2, %xmm1
movsd %xmm1, 16(%rsp)
.L74:
flds .LC18(%rip)
fmull 16(%rsp)
fnstcw 30(%rsp)
movzwl 30(%rsp), %eax
andb $-13, %ah
orb $8, %ah
movw %ax, 28(%rsp)
fldcw 28(%rsp)
frndint
fldcw 30(%rsp)
movzwl 30(%rsp), %eax
orb $12, %ah
movw %ax, 26(%rsp)
fldcw 26(%rsp)
fistpq 16(%rsp)
fldcw 30(%rsp)
movq 16(%rsp), %rax
movl %eax, 56(%rsp)
movl $1, 60(%rsp)
movl $256, 68(%rsp)
movl $1, 72(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 68(%rsp), %rdx
movl $1, %ecx
movq 56(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L91
.L75:
call cudaGetLastError@PLT
movl %eax, %ebp
cmpb $0, check_cuda_status(%rip)
je .L76
testl %eax, %eax
jne .L92
.L76:
call cudaDeviceSynchronize@PLT
movl %eax, %ebp
cmpb $0, check_cuda_status(%rip)
je .L77
testl %eax, %eax
jne .L93
.L77:
movl $2, %ecx
movq %rbx, %rdx
movq 48(%rsp), %rsi
movq S(%rip), %rdi
call cudaMemcpy@PLT
movl %eax, %ebx
cmpb $0, check_cuda_status(%rip)
je .L78
testl %eax, %eax
jne .L94
.L78:
movl $0, %esi
movq 40(%rsp), %rdi
call cudaEventRecord@PLT
movq 40(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 96(%rsp), %rdi
movq 40(%rsp), %rdx
movq 32(%rsp), %rsi
call cudaEventElapsedTime@PLT
pxor %xmm0, %xmm0
cvtss2sd 96(%rsp), %xmm0
leaq .LC22(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
leaq 80(%rsp), %rsi
movl %ebx, %edx
movq 48(%rsp), %rdi
call _Z7cleanupPbR7Wheel_t9cudaError
movl %eax, %r14d
jmp .L60
.L84:
leaq .LC7(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
jmp .L60
.L85:
leaq .LC8(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
leaq 80(%rsp), %rsi
movl %r14d, %edx
movq 48(%rsp), %rdi
call _Z7cleanupPbR7Wheel_t9cudaError
movl %eax, %r14d
.L60:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L95
movl %r14d, %eax
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L86:
.cfi_restore_state
leaq .LC9(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
leaq 80(%rsp), %rsi
movl %r14d, %edx
movq 48(%rsp), %rdi
call _Z7cleanupPbR7Wheel_t9cudaError
movl %eax, %r14d
jmp .L60
.L87:
leaq .LC10(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
leaq 80(%rsp), %rsi
movl %r14d, %edx
movq 48(%rsp), %rdi
call _Z7cleanupPbR7Wheel_t9cudaError
movl %eax, %r14d
jmp .L60
.L88:
leaq .LC11(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
leaq 80(%rsp), %rsi
movl %r14d, %edx
movq 48(%rsp), %rdi
call _Z7cleanupPbR7Wheel_t9cudaError
movl %eax, %r14d
jmp .L60
.L89:
leaq .LC12(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
leaq 80(%rsp), %rsi
movl %r14d, %edx
movq 48(%rsp), %rdi
call _Z7cleanupPbR7Wheel_t9cudaError
movl %eax, %r14d
jmp .L60
.L90:
leaq .LC13(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
leaq 80(%rsp), %rsi
movl %r14d, %edx
movq 48(%rsp), %rdi
call _Z7cleanupPbR7Wheel_t9cudaError
movl %eax, %r14d
jmp .L60
.L69:
movq %rbx, %rax
shrq %rax
movq %rbx, %rdx
andl $1, %edx
orq %rdx, %rax
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
addsd %xmm0, %xmm0
jmp .L70
.L81:
call sqrt@PLT
jmp .L73
.L91:
movdqu 0(%rbp), %xmm5
movaps %xmm5, 96(%rsp)
leaq 96(%rsp), %rcx
movq 48(%rsp), %r9
movq %r13, %r8
movq %r12, %rdx
movq 8(%rsp), %rsi
movq %rbx, %rdi
call _Z51__device_stub__Z19parallelSieveKernelyyy7Wheel_tyPbyyyR7Wheel_tyPb
jmp .L75
.L92:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rcx
leaq .LC19(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
leaq 80(%rsp), %rsi
movl %ebp, %edx
movq 48(%rsp), %rdi
call _Z7cleanupPbR7Wheel_t9cudaError
movl %eax, %r14d
jmp .L60
.L93:
movl %eax, %ecx
leaq .LC20(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
leaq 80(%rsp), %rsi
movl %ebp, %edx
movq 48(%rsp), %rdi
call _Z7cleanupPbR7Wheel_t9cudaError
movl %eax, %r14d
jmp .L60
.L94:
leaq .LC21(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
leaq 80(%rsp), %rsi
movl %ebx, %edx
movq 48(%rsp), %rdi
call _Z7cleanupPbR7Wheel_t9cudaError
movl %eax, %r14d
jmp .L60
.L95:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2345:
.size _Z13parallelSieveyyyRK7Wheel_ty, .-_Z13parallelSieveyyyRK7Wheel_ty
.section .rodata.str1.1
.LC25:
.string "parallelSieve() failed!"
.text
.globl _Z12algorithm4_1y
.type _Z12algorithm4_1y, @function
_Z12algorithm4_1y:
.LFB2344:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $72, %rsp
.cfi_def_cfa_offset 112
movq %rdi, %rbx
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
movq %rdi, (%rsp)
fildq (%rsp)
fld %st(0)
fstpt (%rsp)
testq %rdi, %rdi
js .L127
fstp %st(0)
.L97:
fldt (%rsp)
fldz
fucomip %st(1), %st
ja .L128
fstp %st(0)
.L98:
movq %rbx, %rdi
call _Znam@PLT
movq %rax, %r12
movq %rax, 32(%rsp)
movq %rbx, %rax
shrq $60, %rax
jne .L99
leaq 0(,%rbx,8), %rdi
call _Znam@PLT
movq %rax, %rbp
movq %rax, 40(%rsp)
pushq 8(%rsp)
.cfi_def_cfa_offset 120
pushq 8(%rsp)
.cfi_def_cfa_offset 128
call log10l@PLT
fmuls .LC24(%rip)
leaq -16(%rsp), %rsp
.cfi_def_cfa_offset 144
fstpt (%rsp)
movq %rbx, %rdi
call _Z17EratosthenesSieveey
movq %rax, %r10
addq $32, %rsp
.cfi_def_cfa_offset 112
movl $1, %r9d
testq %rax, %rax
je .L101
movq S(%rip), %rdx
movl $0, %eax
movl $1, %r9d
jmp .L104
.L127:
fadds .LC0(%rip)
fstpt (%rsp)
jmp .L97
.L128:
leaq -16(%rsp), %rsp
.cfi_def_cfa_offset 128
fstpt (%rsp)
call sqrtl@PLT
fstp %st(0)
addq $16, %rsp
.cfi_def_cfa_offset 112
jmp .L98
.L99:
movq 56(%rsp), %rax
subq %fs:40, %rax
je .L102
call __stack_chk_fail@PLT
.L102:
call __cxa_throw_bad_array_new_length@PLT
.L103:
addq $1, %rax
cmpq %rax, %r10
je .L101
.L104:
cmpb $0, (%rdx,%rax)
je .L103
imulq %rax, %r9
jmp .L103
.L101:
movl $0, %r8d
movq %r9, %r11
movl $0, %r13d
testq %rbx, %rbx
jne .L105
.L106:
fildl P(%rip)
fldt (%rsp)
fdivp %st, %st(1)
fnstcw 30(%rsp)
movzwl 30(%rsp), %eax
andb $-13, %ah
orb $8, %ah
movw %ax, 26(%rsp)
fldcw 26(%rsp)
frndint
fldcw 30(%rsp)
flds .LC2(%rip)
fxch %st(1)
fcomi %st(1), %st
fstp %st(1)
jnb .L112
movzwl 30(%rsp), %eax
orb $12, %ah
movw %ax, 28(%rsp)
fldcw 28(%rsp)
fistpq (%rsp)
fldcw 30(%rsp)
movq (%rsp), %r8
.L113:
leaq 32(%rsp), %rcx
movq %r9, %rdx
movq %r10, %rsi
movq %rbx, %rdi
call _Z13parallelSieveyyyRK7Wheel_ty
movl %eax, %ebx
cmpb $0, check_cuda_status(%rip)
je .L114
testl %eax, %eax
jne .L129
.L114:
movq %r12, %rdi
call _ZdaPv@PLT
movq %rbp, %rdi
call _ZdaPv@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L130
movl %ebx, %eax
addq $72, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L118:
.cfi_restore_state
movq %rax, %rdi
.L110:
leaq (%rdi,%r8), %rax
testq %r9, %r9
je .L117
movq %r11, %rcx
.L109:
movq %rcx, %rsi
movl $0, %edx
divq %rcx
movq %rdx, %rcx
movq %rsi, %rax
testq %rdx, %rdx
jne .L109
.L108:
leaq 1(%rdi), %rax
cmpq $1, %rsi
jne .L118
movq %rdi, 0(%rbp,%r8,8)
addq $1, %r8
cmpq %r8, %rbx
je .L106
.L105:
movq %r11, %rcx
movq %r8, %rax
movq %r8, %rsi
testq %r9, %r9
je .L111
.L107:
movq %rcx, %rsi
movl $0, %edx
divq %rcx
movq %rdx, %rcx
movq %rsi, %rax
testq %rdx, %rdx
jne .L107
.L111:
cmpq $1, %rsi
sete (%r12,%r8)
movq %r13, %rdi
jmp .L110
.L117:
movq %rax, %rsi
jmp .L108
.L112:
fsubs .LC2(%rip)
fnstcw 30(%rsp)
movzwl 30(%rsp), %eax
orb $12, %ah
movw %ax, 28(%rsp)
fldcw 28(%rsp)
fistpq (%rsp)
fldcw 30(%rsp)
movq (%rsp), %r8
btcq $63, %r8
jmp .L113
.L129:
leaq .LC25(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
jmp .L114
.L130:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2344:
.size _Z12algorithm4_1y, .-_Z12algorithm4_1y
.section .rodata.str1.1
.LC27:
.string "Find primes up to: %llu\n\n"
.LC28:
.string "Algorithm 4.1"
.section .rodata.str1.8
.align 8
.LC29:
.string "Algorithm 4.1 failed to execute!"
.section .rodata.str1.1
.LC30:
.string "%llu "
.text
.globl main
.type main, @function
main:
.LFB2341:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
movq 8(%rsi), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtoull@PLT
movq %rax, %rbp
movq %rax, %rdi
call _Znam@PLT
movq %rax, S(%rip)
movq %rbp, %rdx
leaq .LC27(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq myTimezone(%rip), %rsi
leaq startTime(%rip), %rdi
call gettimeofday@PLT
movq %rbp, %rdi
call _Z12algorithm4_1y
movl %eax, %ebx
leaq .LC28(%rip), %rdi
call _Z19report_running_timePKc
cmpb $0, check_cuda_status(%rip)
je .L140
testl %ebx, %ebx
jne .L132
.L140:
movl $0, %ebx
leaq .LC30(%rip), %r12
testq %rbp, %rbp
jne .L134
.L135:
movq S(%rip), %rdi
movl $0, %eax
testq %rdi, %rdi
je .L131
call _ZdaPv@PLT
movl $0, %eax
.L131:
popq %rbx
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L132:
.cfi_restore_state
leaq .LC29(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %eax
jmp .L131
.L137:
addq $1, %rbx
cmpq %rbp, %rbx
je .L135
.L134:
movq S(%rip), %rax
cmpb $0, (%rax,%rbx)
je .L137
movl %ebx, %edx
movq %r12, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L137
.cfi_endproc
.LFE2341:
.size main, .-main
.section .rodata.str1.8
.align 8
.LC31:
.string "_Z19parallelSieveKernelyyy7Wheel_tyPb"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2375:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC31(%rip), %rdx
movq %rdx, %rcx
leaq _Z19parallelSieveKernelyyy7Wheel_tyPb(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2375:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.globl endTime
.bss
.align 16
.type endTime, @object
.size endTime, 16
endTime:
.zero 16
.globl startTime
.align 16
.type startTime, @object
.size startTime, 16
startTime:
.zero 16
.globl myTimezone
.align 8
.type myTimezone, @object
.size myTimezone, 8
myTimezone:
.zero 8
.globl check_cuda_status
.type check_cuda_status, @object
.size check_cuda_status, 1
check_cuda_status:
.zero 1
.globl P
.align 4
.type P, @object
.size P, 4
P:
.zero 4
.globl S
.align 8
.type S, @object
.size S, 8
S:
.zero 8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC0:
.long 1602224128
.align 4
.LC2:
.long 1593835520
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC6:
.long 0
.long 1093567616
.align 8
.LC15:
.long 0
.long 1127219200
.align 8
.LC17:
.long 0
.long 1072693248
.section .rodata.cst4
.align 4
.LC18:
.long 998244352
.section .rodata.cst8
.align 8
.LC23:
.long -1
.long 2147483647
.section .rodata.cst4
.align 4
.LC24:
.long 1048576000
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4:
|
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
|
/* SorensonPar.cu
Parallel Implementation of Algorithm 4.1
as discussed in Sorenson and Parberry's
1994 paper "Two Fast Parallel Prime Number
Sieves".
Authors:
Daniel Anzaldo
David Frank
Antonio Lanfranchi
*/
// Visual Studio Dependencies (Can be commented out)
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
// C dependencies
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
// C++ dependencies
#include <algorithm>
typedef unsigned long long big;
// GLOBAL VARIABLES--------------------------------------
typedef struct Wheel_t // Struct-of-Arrays Wheel
{
bool * rp; // Numbers relatively prime to m
big * dist; // D s.t. x + d is the smallest integer >dist[x] relatively prime to m
} Wheel_k;
bool * S; // Global shared bit array of numbers up to N
int P; // Global number of processors
bool check_cuda_status = false; // turn to false when running on circe
/* These are for tracking time */
struct timezone myTimezone;
struct timeval startTime, endTime;
// HOST FUNCTION HEADERS---------------------------------
/* gcd
Host version of the Euclidean Method
*/
__host__ big gcd(big u, big v);
/* EratosthenesSieve
HELPER: for Algorithm 4.1 Sequential Portion
The most basic form of generating primes.
Used to help find the first k primes.
Returns the k-th prime.
*/
big EratosthenesSieve(long double x);
/* Algorithm 4.1 Sequential Portion
Running Time: O(sqrt(n))
Space: O(sqrt(n)) up to O(sqrt(n)/log log n)
*/
cudaError_t algorithm4_1(big n);
/* Algorithm 4.1 Helper: Parallel Sieve
All CUDA-related functionality goes here.
This code will change for different kernel versions.
*/
cudaError_t parallelSieve(
big n, big k, big m, const Wheel_k &wheel, big range);
/* Frees the memory allocated on the device and returns any errors*/
cudaError_t cleanup(bool *d_S, Wheel_k &wheel, cudaError_t cudaStatus);
/* Set a checkpoint and show the total running time in seconds */
double report_running_time(const char *arr);
// DEVICE MATH FUNCTIONS---------------------------------
/* gcd_d
Device version of the Euclidean Method
find number c such that: a = sc, b = tc
*/
__device__ big gcd_d(big a, big b)
{
big tmp;
while (b!=0)
{
tmp = a;
a = b;
b = tmp%b;
}
return a;
}
/* gcd_d
Device version of the Binary Method
with bit arithmetic
*/
/*
__device__ big gcd_d(big u, big v)
{
big g = 1;
while ((u % 2 == 0) && (v % 2 == 0))
{
g <<= 1;
u >>= 1;
v >>= 1;
}
while (u != 0 && v != 0)
if (u % 2 == 0) u >>= 1;
else if (v % 2 == 0) v >>= 1;
else if (u > v) u = (u - v) >> 1;
else v = (v - u) >> 1;
return (g * (u + v));
}
*/
/* sqrt_d
Device version of the Square Root Function
Babylonian Method
*/
__device__ big sqrt_d(big a)
{
big root = a/2;
for (big n = 0; n < 10; n++)
{
root = 0.5 * (root + (a/root));
}
return root;
}
__device__ big min_d(big a, big b)
{
return (a < b) ? a : b;
}
__device__ big max_d(big a, big b)
{
return (a > b) ? a : b;
}
// ALGORITHM 4.1 KERNEL VERSIONS-------------------------
/* Algorithm 4.1: Parallel Sieve Kernel version 1
Parallelization: O(sqrt(n)) processors
Space: O(sqrt(n)) up to O(sqrt(n)/log log n)
PRAM Mode: Exclusive Read, Exclusive Write (EREW)
Remarks: No optimizations yet performed.
For n = 1 billion, it uses 31623 threads
*/
__global__ void parallelSieveKernel(
big n, big k, big m, Wheel_k d_wheel, big range, bool *d_S)
{
big sqrt_N = sqrt_d(n);
// Express the sieve in thread mode.
big i = threadIdx.x + blockIdx.x * blockDim.x;
// Threads beyond n will not do work.
if (i <= n)
{
big L = range * i + 1;
big R = min_d(range * (i + 1), n);
/* Range Sieving */
for (big x = L; x < R; x++)
d_S[x] = d_wheel.rp[x % m];
/* For every prime from prime[k] up to sqrt(N) */
for (big q = k; q < sqrt_N; q++)
{
if (d_S[q])
{
/* Compute smallest f s.t.
gcd_d(qf, m) == 1,
qf >= max_d(L, q^2) */
big f = max_d(q - 1, (big)((L / q) - 1));
/* f = f + W_k[f mod m].dist */
f += d_wheel.dist[f % m];
/* Remove the multiples of current prime */
while ((q * f) <= R)
{
// EREW Precaution. May need to be atomic operation.
if (!(d_S[q * f])) d_S[q * f] = false;
f += d_wheel.dist[f % m];
}
}
}
}
}
/* TODO: Algorithm 4.1: Parallel Sieve Kernel version 2
Remarks: Prime table S within [0, sqrt(n)] migrated to const memory
Wheel completely migrated to const memory
Beware that const memory is only 64kB.
Benchmark with the Profiler first before creating this!
*/
__global__ void parallelSieveKernel2(
big n, big k, big m, Wheel_k d_wheel, big range, bool *d_S);
/* TODO: Algorithm 4.1: Parallel Sieve Kernel version 3
Remarks: Prime table S within [0, sqrt(n)] migrated to const memory
Wheel completely migrated to const memory
Probable use of the shared memory
Probable use of registers
Beware that register is only 4B or 32b.
Beware that const memory is only 64kB.
Benchmark with the Profiler first before creating this!
*/
__global__ void parallelSieveKernel3(
big n, big k, big m, Wheel_k d_wheel, big range, bool *d_S);
/* MAIN
To run this add the ff. args:
1. N = the number up to which you're sieving
*/
int main(int argc, char **argv)
{
big N = (big)strtoull(argv[1], NULL, 10);
S = new bool[N]; //(bool*)malloc(N * sizeof(bool));
printf("Find primes up to: %llu\n\n", N);
/* start counting time */
gettimeofday(&startTime, &myTimezone);
cudaError_t x = algorithm4_1(N);
/* check the total running time */
report_running_time("Algorithm 4.1");
if (check_cuda_status)
{
if (x != cudaSuccess) {
printf("Algorithm 4.1 failed to execute!");
return 1;
}
}
// Display the primes.
for (int i = 0; i < N; i++)
if (S[i]) printf("%llu ", i);
delete[] S;
return 0;
}
// HOST FUNCTION DEFINITIONS-----------------------------
// Euclidean Method
__host__ big gcd(big u, big v)
{
big tmp;
while (v != 0)
{
tmp = u;
u = v;
v = tmp%v;
}
return u;
}
// Binary Method
/*
__host__ big gcd(big u, big v)
{
big g = 1;
while ((u % 2 == 0) && (v % 2 == 0))
{
g <<= 1;
u >>= 1;
v >>= 1;
}
while (u != 0 && v != 0)
if (u % 2 == 0) u >>= 1;
else if (v % 2 == 0) v >>= 1;
else if (u > v) u = (u - v) >> 1;
else v = (v - u) >> 1;
return (g * (u + v));
}
*/
big EratosthenesSieve(long double k, big n)
{
big kthPrime = 0;
// 0 and 1 are non-primes.
S[0] = S[1] = false;
for (big i = 2; i < n; i++)
S[i] = true;
// Simple Sieving Operation.
for (big i = 2; i < (big)sqrtl(n); i++)
if (S[i])
{
int j;
for (j = i*i; j < n; j += i)
S[j] = false;
}
// Find the k-th prime.
for (big i = k; i > 2; i--)
if (S[i]) kthPrime = i;
return kthPrime;
}
cudaError_t algorithm4_1(big n)
{
/* VARIABLES */
big range;
big sqrt_N = (big)sqrtl((long double)n);
Wheel_k wheel;
/* Allocation of wheel */
wheel.rp = new bool[n];
wheel.dist = new big[n];
/* Find the first k primes
K = maximal s.t. S[K] <= (log N) / 4
Find primes up to sqrt(N) */
big k = EratosthenesSieve(log10l((long double)n) / 4, n);
/* Find the product of the first k primes m */
big m = 1;
for (big ii = 0; ii < k; ii++)
if (S[ii]) m *= ii;
/* Compute k-th wheel W_k
FUTURE OPTIMIZATION: Delegate kernel for computation */
for (big x = 0; x < n; x++)
{
// True if rp[x] is relatively prime to m
wheel.rp[x] = (gcd(x, m) == 1);
/* This is d s.t. x + d is
the smallest integer >dist[x]
relatively prime to m */
int d = 0;
while (gcd(x + d, m) != 1)
d++;
wheel.dist[x] = d;
}
/* Delta = ceil(n/p) */
range = (big)ceill(n / (long double)P);
/* PARALLEL PART */
cudaError_t parallelStatus = parallelSieve(n, k, m, wheel, range);
if (check_cuda_status)
{
if (parallelStatus != cudaSuccess) {
fprintf(stderr, "parallelSieve() failed!");
}
}
/* FREE */
delete[] wheel.rp;
delete[] wheel.dist;
return parallelStatus;
}
cudaError_t parallelSieve(
big n, big k, big m, const Wheel_k &wheel, big range)
{
cudaError_t cudaStatus;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
/* The Number Field S
will be migrated to GLOBAL memory
OPTIMIZATION: ranges will be migrated to SHARED memory
OPTIMIZATION: [0, sqrt(n)] will be migrated to CONSTANT memory
*/
bool * d_S = NULL;
// The Wheel Precomputed Table
// will be migrated to GLOBAL memory
// OPTIMIZATION: may be migrated to CONSTANT memory as well
Wheel_k d_wheel;
d_wheel.rp = NULL;
d_wheel.dist = NULL;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (check_cuda_status)
{
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?\n");
return cudaStatus;
}
}
// Measure start time for CUDA portion
cudaEventRecord(start, 0);
// CUDA Memory Allocations.
cudaStatus = cudaMalloc((void**)&d_S, n * sizeof(bool));
if (check_cuda_status)
{
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed on number field S!\n");
return cleanup(d_S, d_wheel, cudaStatus);
}
}
cudaStatus = cudaMalloc((void**)&(d_wheel.rp), n * sizeof(bool));
if (check_cuda_status)
{
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed on wheel.rp!\n");
return cleanup(d_S, d_wheel, cudaStatus);
}
}
cudaStatus = cudaMalloc((void**)&(d_wheel.dist), n * sizeof(big));
if (check_cuda_status)
{
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed on wheel.dist!\n");
return cleanup(d_S, d_wheel, cudaStatus);
}
}
// cudaMemCpy -> Device
cudaStatus = cudaMemcpy(d_S, S, n * sizeof(bool), cudaMemcpyHostToDevice);
if (check_cuda_status)
{
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed! S->d_S.\n");
return cleanup(d_S, d_wheel, cudaStatus);
}
}
cudaStatus = cudaMemcpy(d_wheel.rp, wheel.rp, n * sizeof(bool), cudaMemcpyHostToDevice);
if (check_cuda_status)
{
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed! wheel.rp->d_wheel.rp\n");
return cleanup(d_S, d_wheel, cudaStatus);
}
}
cudaStatus = cudaMemcpy(d_wheel.dist, wheel.dist, n * sizeof(big), cudaMemcpyHostToDevice);
if (check_cuda_status)
{
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed! wheel.dist->d_wheel.dist\n");
return cleanup(d_S, d_wheel, cudaStatus);
}
}
// Kernel Call
dim3 gridSize(ceill(ceill(sqrt(n))/256), 1, 1);
dim3 blockSize(256, 1, 1);
parallelSieveKernel<<<gridSize, blockSize>>>(n, k, m, wheel, range, d_S);
cudaStatus = cudaGetLastError();
if (check_cuda_status)
{
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "parallelSieveKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
return cleanup(d_S, d_wheel, cudaStatus);
}
}
cudaStatus = cudaDeviceSynchronize();
if (check_cuda_status)
{
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
return cleanup(d_S, d_wheel, cudaStatus);
}
}
// cudaMemCpy -> Host
cudaStatus = cudaMemcpy(S, d_S, n * sizeof(bool), cudaMemcpyDeviceToHost);
if (check_cuda_status)
{
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed! d_S->S.\n");
return cleanup(d_S, d_wheel, cudaStatus);
}
}
// Measure stop time for CUDA portion
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("Time to generate: %0.5f ms\n", elapsedTime);
// cudaFree
return cleanup(d_S, d_wheel, cudaStatus);
}
cudaError_t cleanup(bool *d_S, Wheel_k &wheel, cudaError_t cudaStatus)
{
cudaFree(d_S);
cudaFree(wheel.rp);
cudaFree(wheel.dist);
return cudaStatus;
}
/*
set a checkpoint and show the (natural) running time in seconds
*/
double report_running_time(const char *arr) {
long sec_diff, usec_diff;
gettimeofday(&endTime, &myTimezone);
sec_diff = endTime.tv_sec - startTime.tv_sec;
usec_diff= endTime.tv_usec-startTime.tv_usec;
if(usec_diff < 0) {
sec_diff --;
usec_diff += 1000000;
}
printf("Running time for %s: %ld.%06ld sec\n\n", arr, sec_diff, usec_diff);
return (double)(sec_diff*1.0 + usec_diff/1000000.0);
}
|
/* SorensonPar.cu
Parallel Implementation of Algorithm 4.1
as discussed in Sorenson and Parberry's
1994 paper "Two Fast Parallel Prime Number
Sieves".
Authors:
Daniel Anzaldo
David Frank
Antonio Lanfranchi
*/
// Visual Studio Dependencies (Can be commented out)
#include "hip/hip_runtime.h"
// C dependencies
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
// C++ dependencies
#include <algorithm>
typedef unsigned long long big;
// GLOBAL VARIABLES--------------------------------------
typedef struct Wheel_t // Struct-of-Arrays Wheel
{
bool * rp; // Numbers relatively prime to m
big * dist; // D s.t. x + d is the smallest integer >dist[x] relatively prime to m
} Wheel_k;
bool * S; // Global shared bit array of numbers up to N
int P; // Global number of processors
bool check_cuda_status = false; // turn to false when running on circe
/* These are for tracking time */
struct timezone myTimezone;
struct timeval startTime, endTime;
// HOST FUNCTION HEADERS---------------------------------
/* gcd
Host version of the Euclidean Method
*/
__host__ big gcd(big u, big v);
/* EratosthenesSieve
HELPER: for Algorithm 4.1 Sequential Portion
The most basic form of generating primes.
Used to help find the first k primes.
Returns the k-th prime.
*/
big EratosthenesSieve(long double x);
/* Algorithm 4.1 Sequential Portion
Running Time: O(sqrt(n))
Space: O(sqrt(n)) up to O(sqrt(n)/log log n)
*/
hipError_t algorithm4_1(big n);
/* Algorithm 4.1 Helper: Parallel Sieve
All CUDA-related functionality goes here.
This code will change for different kernel versions.
*/
hipError_t parallelSieve(
big n, big k, big m, const Wheel_k &wheel, big range);
/* Frees the memory allocated on the device and returns any errors*/
hipError_t cleanup(bool *d_S, Wheel_k &wheel, hipError_t cudaStatus);
/* Set a checkpoint and show the total running time in seconds */
double report_running_time(const char *arr);
// DEVICE MATH FUNCTIONS---------------------------------
/* gcd_d
Device version of the Euclidean Method
find number c such that: a = sc, b = tc
*/
__device__ big gcd_d(big a, big b)
{
big tmp;
while (b!=0)
{
tmp = a;
a = b;
b = tmp%b;
}
return a;
}
/* gcd_d
Device version of the Binary Method
with bit arithmetic
*/
/*
__device__ big gcd_d(big u, big v)
{
big g = 1;
while ((u % 2 == 0) && (v % 2 == 0))
{
g <<= 1;
u >>= 1;
v >>= 1;
}
while (u != 0 && v != 0)
if (u % 2 == 0) u >>= 1;
else if (v % 2 == 0) v >>= 1;
else if (u > v) u = (u - v) >> 1;
else v = (v - u) >> 1;
return (g * (u + v));
}
*/
/* sqrt_d
Device version of the Square Root Function
Babylonian Method
*/
__device__ big sqrt_d(big a)
{
big root = a/2;
for (big n = 0; n < 10; n++)
{
root = 0.5 * (root + (a/root));
}
return root;
}
__device__ big min_d(big a, big b)
{
return (a < b) ? a : b;
}
__device__ big max_d(big a, big b)
{
return (a > b) ? a : b;
}
// ALGORITHM 4.1 KERNEL VERSIONS-------------------------
/* Algorithm 4.1: Parallel Sieve Kernel version 1
Parallelization: O(sqrt(n)) processors
Space: O(sqrt(n)) up to O(sqrt(n)/log log n)
PRAM Mode: Exclusive Read, Exclusive Write (EREW)
Remarks: No optimizations yet performed.
For n = 1 billion, it uses 31623 threads
*/
__global__ void parallelSieveKernel(
big n, big k, big m, Wheel_k d_wheel, big range, bool *d_S)
{
big sqrt_N = sqrt_d(n);
// Express the sieve in thread mode.
big i = threadIdx.x + blockIdx.x * blockDim.x;
// Threads beyond n will not do work.
if (i <= n)
{
big L = range * i + 1;
big R = min_d(range * (i + 1), n);
/* Range Sieving */
for (big x = L; x < R; x++)
d_S[x] = d_wheel.rp[x % m];
/* For every prime from prime[k] up to sqrt(N) */
for (big q = k; q < sqrt_N; q++)
{
if (d_S[q])
{
/* Compute smallest f s.t.
gcd_d(qf, m) == 1,
qf >= max_d(L, q^2) */
big f = max_d(q - 1, (big)((L / q) - 1));
/* f = f + W_k[f mod m].dist */
f += d_wheel.dist[f % m];
/* Remove the multiples of current prime */
while ((q * f) <= R)
{
// EREW Precaution. May need to be atomic operation.
if (!(d_S[q * f])) d_S[q * f] = false;
f += d_wheel.dist[f % m];
}
}
}
}
}
/* TODO: Algorithm 4.1: Parallel Sieve Kernel version 2
Remarks: Prime table S within [0, sqrt(n)] migrated to const memory
Wheel completely migrated to const memory
Beware that const memory is only 64kB.
Benchmark with the Profiler first before creating this!
*/
__global__ void parallelSieveKernel2(
big n, big k, big m, Wheel_k d_wheel, big range, bool *d_S);
/* TODO: Algorithm 4.1: Parallel Sieve Kernel version 3
Remarks: Prime table S within [0, sqrt(n)] migrated to const memory
Wheel completely migrated to const memory
Probable use of the shared memory
Probable use of registers
Beware that register is only 4B or 32b.
Beware that const memory is only 64kB.
Benchmark with the Profiler first before creating this!
*/
__global__ void parallelSieveKernel3(
big n, big k, big m, Wheel_k d_wheel, big range, bool *d_S);
/* MAIN
To run this add the ff. args:
1. N = the number up to which you're sieving
*/
int main(int argc, char **argv)
{
big N = (big)strtoull(argv[1], NULL, 10);
S = new bool[N]; //(bool*)malloc(N * sizeof(bool));
printf("Find primes up to: %llu\n\n", N);
/* start counting time */
gettimeofday(&startTime, &myTimezone);
hipError_t x = algorithm4_1(N);
/* check the total running time */
report_running_time("Algorithm 4.1");
if (check_cuda_status)
{
if (x != hipSuccess) {
printf("Algorithm 4.1 failed to execute!");
return 1;
}
}
// Display the primes.
for (int i = 0; i < N; i++)
if (S[i]) printf("%llu ", i);
delete[] S;
return 0;
}
// HOST FUNCTION DEFINITIONS-----------------------------
// Euclidean Method
__host__ big gcd(big u, big v)
{
big tmp;
while (v != 0)
{
tmp = u;
u = v;
v = tmp%v;
}
return u;
}
// Binary Method
/*
__host__ big gcd(big u, big v)
{
big g = 1;
while ((u % 2 == 0) && (v % 2 == 0))
{
g <<= 1;
u >>= 1;
v >>= 1;
}
while (u != 0 && v != 0)
if (u % 2 == 0) u >>= 1;
else if (v % 2 == 0) v >>= 1;
else if (u > v) u = (u - v) >> 1;
else v = (v - u) >> 1;
return (g * (u + v));
}
*/
big EratosthenesSieve(long double k, big n)
{
big kthPrime = 0;
// 0 and 1 are non-primes.
S[0] = S[1] = false;
for (big i = 2; i < n; i++)
S[i] = true;
// Simple Sieving Operation.
for (big i = 2; i < (big)sqrtl(n); i++)
if (S[i])
{
int j;
for (j = i*i; j < n; j += i)
S[j] = false;
}
// Find the k-th prime.
for (big i = k; i > 2; i--)
if (S[i]) kthPrime = i;
return kthPrime;
}
hipError_t algorithm4_1(big n)
{
/* VARIABLES */
big range;
big sqrt_N = (big)sqrtl((long double)n);
Wheel_k wheel;
/* Allocation of wheel */
wheel.rp = new bool[n];
wheel.dist = new big[n];
/* Find the first k primes
K = maximal s.t. S[K] <= (log N) / 4
Find primes up to sqrt(N) */
big k = EratosthenesSieve(log10l((long double)n) / 4, n);
/* Find the product of the first k primes m */
big m = 1;
for (big ii = 0; ii < k; ii++)
if (S[ii]) m *= ii;
/* Compute k-th wheel W_k
FUTURE OPTIMIZATION: Delegate kernel for computation */
for (big x = 0; x < n; x++)
{
// True if rp[x] is relatively prime to m
wheel.rp[x] = (gcd(x, m) == 1);
/* This is d s.t. x + d is
the smallest integer >dist[x]
relatively prime to m */
int d = 0;
while (gcd(x + d, m) != 1)
d++;
wheel.dist[x] = d;
}
/* Delta = ceil(n/p) */
range = (big)ceill(n / (long double)P);
/* PARALLEL PART */
hipError_t parallelStatus = parallelSieve(n, k, m, wheel, range);
if (check_cuda_status)
{
if (parallelStatus != hipSuccess) {
fprintf(stderr, "parallelSieve() failed!");
}
}
/* FREE */
delete[] wheel.rp;
delete[] wheel.dist;
return parallelStatus;
}
hipError_t parallelSieve(
big n, big k, big m, const Wheel_k &wheel, big range)
{
hipError_t cudaStatus;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
/* The Number Field S
will be migrated to GLOBAL memory
OPTIMIZATION: ranges will be migrated to SHARED memory
OPTIMIZATION: [0, sqrt(n)] will be migrated to CONSTANT memory
*/
bool * d_S = NULL;
// The Wheel Precomputed Table
// will be migrated to GLOBAL memory
// OPTIMIZATION: may be migrated to CONSTANT memory as well
Wheel_k d_wheel;
d_wheel.rp = NULL;
d_wheel.dist = NULL;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (check_cuda_status)
{
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?\n");
return cudaStatus;
}
}
// Measure start time for CUDA portion
hipEventRecord(start, 0);
// CUDA Memory Allocations.
cudaStatus = hipMalloc((void**)&d_S, n * sizeof(bool));
if (check_cuda_status)
{
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed on number field S!\n");
return cleanup(d_S, d_wheel, cudaStatus);
}
}
cudaStatus = hipMalloc((void**)&(d_wheel.rp), n * sizeof(bool));
if (check_cuda_status)
{
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed on wheel.rp!\n");
return cleanup(d_S, d_wheel, cudaStatus);
}
}
cudaStatus = hipMalloc((void**)&(d_wheel.dist), n * sizeof(big));
if (check_cuda_status)
{
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed on wheel.dist!\n");
return cleanup(d_S, d_wheel, cudaStatus);
}
}
// cudaMemCpy -> Device
cudaStatus = hipMemcpy(d_S, S, n * sizeof(bool), hipMemcpyHostToDevice);
if (check_cuda_status)
{
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed! S->d_S.\n");
return cleanup(d_S, d_wheel, cudaStatus);
}
}
cudaStatus = hipMemcpy(d_wheel.rp, wheel.rp, n * sizeof(bool), hipMemcpyHostToDevice);
if (check_cuda_status)
{
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed! wheel.rp->d_wheel.rp\n");
return cleanup(d_S, d_wheel, cudaStatus);
}
}
cudaStatus = hipMemcpy(d_wheel.dist, wheel.dist, n * sizeof(big), hipMemcpyHostToDevice);
if (check_cuda_status)
{
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed! wheel.dist->d_wheel.dist\n");
return cleanup(d_S, d_wheel, cudaStatus);
}
}
// Kernel Call
dim3 gridSize(ceill(ceill(sqrt(n))/256), 1, 1);
dim3 blockSize(256, 1, 1);
parallelSieveKernel<<<gridSize, blockSize>>>(n, k, m, wheel, range, d_S);
cudaStatus = hipGetLastError();
if (check_cuda_status)
{
if (cudaStatus != hipSuccess) {
fprintf(stderr, "parallelSieveKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
return cleanup(d_S, d_wheel, cudaStatus);
}
}
cudaStatus = hipDeviceSynchronize();
if (check_cuda_status)
{
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
return cleanup(d_S, d_wheel, cudaStatus);
}
}
// cudaMemCpy -> Host
cudaStatus = hipMemcpy(S, d_S, n * sizeof(bool), hipMemcpyDeviceToHost);
if (check_cuda_status)
{
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed! d_S->S.\n");
return cleanup(d_S, d_wheel, cudaStatus);
}
}
// Measure stop time for CUDA portion
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float elapsedTime;
hipEventElapsedTime(&elapsedTime, start, stop);
printf("Time to generate: %0.5f ms\n", elapsedTime);
// cudaFree
return cleanup(d_S, d_wheel, cudaStatus);
}
hipError_t cleanup(bool *d_S, Wheel_k &wheel, hipError_t cudaStatus)
{
hipFree(d_S);
hipFree(wheel.rp);
hipFree(wheel.dist);
return cudaStatus;
}
/*
set a checkpoint and show the (natural) running time in seconds
*/
double report_running_time(const char *arr) {
long sec_diff, usec_diff;
gettimeofday(&endTime, &myTimezone);
sec_diff = endTime.tv_sec - startTime.tv_sec;
usec_diff= endTime.tv_usec-startTime.tv_usec;
if(usec_diff < 0) {
sec_diff --;
usec_diff += 1000000;
}
printf("Running time for %s: %ld.%06ld sec\n\n", arr, sec_diff, usec_diff);
return (double)(sec_diff*1.0 + usec_diff/1000000.0);
}
|
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
|
/* SorensonPar.cu
Parallel Implementation of Algorithm 4.1
as discussed in Sorenson and Parberry's
1994 paper "Two Fast Parallel Prime Number
Sieves".
Authors:
Daniel Anzaldo
David Frank
Antonio Lanfranchi
*/
// Visual Studio Dependencies (Can be commented out)
#include "hip/hip_runtime.h"
// C dependencies
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
// C++ dependencies
#include <algorithm>
typedef unsigned long long big;
// GLOBAL VARIABLES--------------------------------------
typedef struct Wheel_t // Struct-of-Arrays Wheel
{
bool * rp; // Numbers relatively prime to m
big * dist; // D s.t. x + d is the smallest integer >dist[x] relatively prime to m
} Wheel_k;
bool * S; // Global shared bit array of numbers up to N
int P; // Global number of processors
bool check_cuda_status = false; // turn to false when running on circe
/* These are for tracking time */
struct timezone myTimezone;
struct timeval startTime, endTime;
// HOST FUNCTION HEADERS---------------------------------
/* gcd
Host version of the Euclidean Method
*/
__host__ big gcd(big u, big v);
/* EratosthenesSieve
HELPER: for Algorithm 4.1 Sequential Portion
The most basic form of generating primes.
Used to help find the first k primes.
Returns the k-th prime.
*/
big EratosthenesSieve(long double x);
/* Algorithm 4.1 Sequential Portion
Running Time: O(sqrt(n))
Space: O(sqrt(n)) up to O(sqrt(n)/log log n)
*/
hipError_t algorithm4_1(big n);
/* Algorithm 4.1 Helper: Parallel Sieve
All CUDA-related functionality goes here.
This code will change for different kernel versions.
*/
hipError_t parallelSieve(
big n, big k, big m, const Wheel_k &wheel, big range);
/* Frees the memory allocated on the device and returns any errors*/
hipError_t cleanup(bool *d_S, Wheel_k &wheel, hipError_t cudaStatus);
/* Set a checkpoint and show the total running time in seconds */
double report_running_time(const char *arr);
// DEVICE MATH FUNCTIONS---------------------------------
/* gcd_d
Device version of the Euclidean Method
find number c such that: a = sc, b = tc
*/
__device__ big gcd_d(big a, big b)
{
big tmp;
while (b!=0)
{
tmp = a;
a = b;
b = tmp%b;
}
return a;
}
/* gcd_d
Device version of the Binary Method
with bit arithmetic
*/
/*
__device__ big gcd_d(big u, big v)
{
big g = 1;
while ((u % 2 == 0) && (v % 2 == 0))
{
g <<= 1;
u >>= 1;
v >>= 1;
}
while (u != 0 && v != 0)
if (u % 2 == 0) u >>= 1;
else if (v % 2 == 0) v >>= 1;
else if (u > v) u = (u - v) >> 1;
else v = (v - u) >> 1;
return (g * (u + v));
}
*/
/* sqrt_d
Device version of the Square Root Function
Babylonian Method
*/
__device__ big sqrt_d(big a)
{
big root = a/2;
for (big n = 0; n < 10; n++)
{
root = 0.5 * (root + (a/root));
}
return root;
}
__device__ big min_d(big a, big b)
{
return (a < b) ? a : b;
}
__device__ big max_d(big a, big b)
{
return (a > b) ? a : b;
}
// ALGORITHM 4.1 KERNEL VERSIONS-------------------------
/* Algorithm 4.1: Parallel Sieve Kernel version 1
Parallelization: O(sqrt(n)) processors
Space: O(sqrt(n)) up to O(sqrt(n)/log log n)
PRAM Mode: Exclusive Read, Exclusive Write (EREW)
Remarks: No optimizations yet performed.
For n = 1 billion, it uses 31623 threads
*/
__global__ void parallelSieveKernel(
big n, big k, big m, Wheel_k d_wheel, big range, bool *d_S)
{
big sqrt_N = sqrt_d(n);
// Express the sieve in thread mode.
big i = threadIdx.x + blockIdx.x * blockDim.x;
// Threads beyond n will not do work.
if (i <= n)
{
big L = range * i + 1;
big R = min_d(range * (i + 1), n);
/* Range Sieving */
for (big x = L; x < R; x++)
d_S[x] = d_wheel.rp[x % m];
/* For every prime from prime[k] up to sqrt(N) */
for (big q = k; q < sqrt_N; q++)
{
if (d_S[q])
{
/* Compute smallest f s.t.
gcd_d(qf, m) == 1,
qf >= max_d(L, q^2) */
big f = max_d(q - 1, (big)((L / q) - 1));
/* f = f + W_k[f mod m].dist */
f += d_wheel.dist[f % m];
/* Remove the multiples of current prime */
while ((q * f) <= R)
{
// EREW Precaution. May need to be atomic operation.
if (!(d_S[q * f])) d_S[q * f] = false;
f += d_wheel.dist[f % m];
}
}
}
}
}
/* TODO: Algorithm 4.1: Parallel Sieve Kernel version 2
Remarks: Prime table S within [0, sqrt(n)] migrated to const memory
Wheel completely migrated to const memory
Beware that const memory is only 64kB.
Benchmark with the Profiler first before creating this!
*/
__global__ void parallelSieveKernel2(
big n, big k, big m, Wheel_k d_wheel, big range, bool *d_S);
/* TODO: Algorithm 4.1: Parallel Sieve Kernel version 3
Remarks: Prime table S within [0, sqrt(n)] migrated to const memory
Wheel completely migrated to const memory
Probable use of the shared memory
Probable use of registers
Beware that register is only 4B or 32b.
Beware that const memory is only 64kB.
Benchmark with the Profiler first before creating this!
*/
__global__ void parallelSieveKernel3(
big n, big k, big m, Wheel_k d_wheel, big range, bool *d_S);
/* MAIN
To run this add the ff. args:
1. N = the number up to which you're sieving
*/
int main(int argc, char **argv)
{
big N = (big)strtoull(argv[1], NULL, 10);
S = new bool[N]; //(bool*)malloc(N * sizeof(bool));
printf("Find primes up to: %llu\n\n", N);
/* start counting time */
gettimeofday(&startTime, &myTimezone);
hipError_t x = algorithm4_1(N);
/* check the total running time */
report_running_time("Algorithm 4.1");
if (check_cuda_status)
{
if (x != hipSuccess) {
printf("Algorithm 4.1 failed to execute!");
return 1;
}
}
// Display the primes.
for (int i = 0; i < N; i++)
if (S[i]) printf("%llu ", i);
delete[] S;
return 0;
}
// HOST FUNCTION DEFINITIONS-----------------------------
// Euclidean Method
__host__ big gcd(big u, big v)
{
big tmp;
while (v != 0)
{
tmp = u;
u = v;
v = tmp%v;
}
return u;
}
// Binary Method
/*
__host__ big gcd(big u, big v)
{
big g = 1;
while ((u % 2 == 0) && (v % 2 == 0))
{
g <<= 1;
u >>= 1;
v >>= 1;
}
while (u != 0 && v != 0)
if (u % 2 == 0) u >>= 1;
else if (v % 2 == 0) v >>= 1;
else if (u > v) u = (u - v) >> 1;
else v = (v - u) >> 1;
return (g * (u + v));
}
*/
big EratosthenesSieve(long double k, big n)
{
big kthPrime = 0;
// 0 and 1 are non-primes.
S[0] = S[1] = false;
for (big i = 2; i < n; i++)
S[i] = true;
// Simple Sieving Operation.
for (big i = 2; i < (big)sqrtl(n); i++)
if (S[i])
{
int j;
for (j = i*i; j < n; j += i)
S[j] = false;
}
// Find the k-th prime.
for (big i = k; i > 2; i--)
if (S[i]) kthPrime = i;
return kthPrime;
}
hipError_t algorithm4_1(big n)
{
/* VARIABLES */
big range;
big sqrt_N = (big)sqrtl((long double)n);
Wheel_k wheel;
/* Allocation of wheel */
wheel.rp = new bool[n];
wheel.dist = new big[n];
/* Find the first k primes
K = maximal s.t. S[K] <= (log N) / 4
Find primes up to sqrt(N) */
big k = EratosthenesSieve(log10l((long double)n) / 4, n);
/* Find the product of the first k primes m */
big m = 1;
for (big ii = 0; ii < k; ii++)
if (S[ii]) m *= ii;
/* Compute k-th wheel W_k
FUTURE OPTIMIZATION: Delegate kernel for computation */
for (big x = 0; x < n; x++)
{
// True if rp[x] is relatively prime to m
wheel.rp[x] = (gcd(x, m) == 1);
/* This is d s.t. x + d is
the smallest integer >dist[x]
relatively prime to m */
int d = 0;
while (gcd(x + d, m) != 1)
d++;
wheel.dist[x] = d;
}
/* Delta = ceil(n/p) */
range = (big)ceill(n / (long double)P);
/* PARALLEL PART */
hipError_t parallelStatus = parallelSieve(n, k, m, wheel, range);
if (check_cuda_status)
{
if (parallelStatus != hipSuccess) {
fprintf(stderr, "parallelSieve() failed!");
}
}
/* FREE */
delete[] wheel.rp;
delete[] wheel.dist;
return parallelStatus;
}
hipError_t parallelSieve(
big n, big k, big m, const Wheel_k &wheel, big range)
{
hipError_t cudaStatus;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
/* The Number Field S
will be migrated to GLOBAL memory
OPTIMIZATION: ranges will be migrated to SHARED memory
OPTIMIZATION: [0, sqrt(n)] will be migrated to CONSTANT memory
*/
bool * d_S = NULL;
// The Wheel Precomputed Table
// will be migrated to GLOBAL memory
// OPTIMIZATION: may be migrated to CONSTANT memory as well
Wheel_k d_wheel;
d_wheel.rp = NULL;
d_wheel.dist = NULL;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (check_cuda_status)
{
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?\n");
return cudaStatus;
}
}
// Measure start time for CUDA portion
hipEventRecord(start, 0);
// CUDA Memory Allocations.
cudaStatus = hipMalloc((void**)&d_S, n * sizeof(bool));
if (check_cuda_status)
{
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed on number field S!\n");
return cleanup(d_S, d_wheel, cudaStatus);
}
}
cudaStatus = hipMalloc((void**)&(d_wheel.rp), n * sizeof(bool));
if (check_cuda_status)
{
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed on wheel.rp!\n");
return cleanup(d_S, d_wheel, cudaStatus);
}
}
cudaStatus = hipMalloc((void**)&(d_wheel.dist), n * sizeof(big));
if (check_cuda_status)
{
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed on wheel.dist!\n");
return cleanup(d_S, d_wheel, cudaStatus);
}
}
// cudaMemCpy -> Device
cudaStatus = hipMemcpy(d_S, S, n * sizeof(bool), hipMemcpyHostToDevice);
if (check_cuda_status)
{
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed! S->d_S.\n");
return cleanup(d_S, d_wheel, cudaStatus);
}
}
cudaStatus = hipMemcpy(d_wheel.rp, wheel.rp, n * sizeof(bool), hipMemcpyHostToDevice);
if (check_cuda_status)
{
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed! wheel.rp->d_wheel.rp\n");
return cleanup(d_S, d_wheel, cudaStatus);
}
}
cudaStatus = hipMemcpy(d_wheel.dist, wheel.dist, n * sizeof(big), hipMemcpyHostToDevice);
if (check_cuda_status)
{
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed! wheel.dist->d_wheel.dist\n");
return cleanup(d_S, d_wheel, cudaStatus);
}
}
// Kernel Call
dim3 gridSize(ceill(ceill(sqrt(n))/256), 1, 1);
dim3 blockSize(256, 1, 1);
parallelSieveKernel<<<gridSize, blockSize>>>(n, k, m, wheel, range, d_S);
cudaStatus = hipGetLastError();
if (check_cuda_status)
{
if (cudaStatus != hipSuccess) {
fprintf(stderr, "parallelSieveKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
return cleanup(d_S, d_wheel, cudaStatus);
}
}
cudaStatus = hipDeviceSynchronize();
if (check_cuda_status)
{
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
return cleanup(d_S, d_wheel, cudaStatus);
}
}
// cudaMemCpy -> Host
cudaStatus = hipMemcpy(S, d_S, n * sizeof(bool), hipMemcpyDeviceToHost);
if (check_cuda_status)
{
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed! d_S->S.\n");
return cleanup(d_S, d_wheel, cudaStatus);
}
}
// Measure stop time for CUDA portion
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float elapsedTime;
hipEventElapsedTime(&elapsedTime, start, stop);
printf("Time to generate: %0.5f ms\n", elapsedTime);
// cudaFree
return cleanup(d_S, d_wheel, cudaStatus);
}
hipError_t cleanup(bool *d_S, Wheel_k &wheel, hipError_t cudaStatus)
{
hipFree(d_S);
hipFree(wheel.rp);
hipFree(wheel.dist);
return cudaStatus;
}
/*
set a checkpoint and show the (natural) running time in seconds
*/
double report_running_time(const char *arr) {
long sec_diff, usec_diff;
gettimeofday(&endTime, &myTimezone);
sec_diff = endTime.tv_sec - startTime.tv_sec;
usec_diff= endTime.tv_usec-startTime.tv_usec;
if(usec_diff < 0) {
sec_diff --;
usec_diff += 1000000;
}
printf("Running time for %s: %ld.%06ld sec\n\n", arr, sec_diff, usec_diff);
return (double)(sec_diff*1.0 + usec_diff/1000000.0);
}
|
.text
.file "SorensonPar.hip"
.globl _Z34__device_stub__parallelSieveKernelyyy7Wheel_tyPb # -- Begin function _Z34__device_stub__parallelSieveKernelyyy7Wheel_tyPb
.p2align 4, 0x90
.type _Z34__device_stub__parallelSieveKernelyyy7Wheel_tyPb,@function
_Z34__device_stub__parallelSieveKernelyyy7Wheel_tyPb: # @_Z34__device_stub__parallelSieveKernelyyy7Wheel_tyPb
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rcx, 80(%rsp)
movq %r8, 88(%rsp)
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movq %r9, 48(%rsp)
leaq 72(%rsp), %rax
movq %rax, 96(%rsp)
leaq 64(%rsp), %rax
movq %rax, 104(%rsp)
leaq 56(%rsp), %rax
movq %rax, 112(%rsp)
leaq 80(%rsp), %rax
movq %rax, 120(%rsp)
leaq 48(%rsp), %rax
movq %rax, 128(%rsp)
leaq 160(%rsp), %rax
movq %rax, 136(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z19parallelSieveKernelyyy7Wheel_tyPb, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end0:
.size _Z34__device_stub__parallelSieveKernelyyy7Wheel_tyPb, .Lfunc_end0-_Z34__device_stub__parallelSieveKernelyyy7Wheel_tyPb
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %rbp, -16
movq 8(%rsi), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtoull
movq %rax, %rbx
movq %rax, %rdi
callq _Znam
movq %rax, S(%rip)
movl $.L.str, %edi
movq %rbx, %rsi
xorl %eax, %eax
callq printf
movl $startTime, %edi
movl $myTimezone, %esi
callq gettimeofday
movq %rbx, %rdi
callq _Z12algorithm4_1y
movl %eax, %ebp
movl $endTime, %edi
movl $myTimezone, %esi
callq gettimeofday
movq endTime(%rip), %rax
subq startTime(%rip), %rax
movq endTime+8(%rip), %rsi
subq startTime+8(%rip), %rsi
leaq 1000000(%rsi), %rcx
movq %rsi, %rdx
sarq $63, %rdx
addq %rax, %rdx
testq %rsi, %rsi
cmovnsq %rsi, %rcx
movl $.L.str.16, %edi
movl $.L.str.1, %esi
xorl %eax, %eax
callq printf
testl %ebp, %ebp
je .LBB1_2
# %bb.1:
cmpb $0, check_cuda_status(%rip)
je .LBB1_2
# %bb.10:
movl $.L.str.2, %edi
xorl %eax, %eax
callq printf
movl $1, %eax
jmp .LBB1_9
.LBB1_2: # %.preheader
testq %rbx, %rbx
je .LBB1_7
# %bb.3: # %.lr.ph.preheader
xorl %r14d, %r14d
jmp .LBB1_4
.p2align 4, 0x90
.LBB1_6: # in Loop: Header=BB1_4 Depth=1
incq %r14
cmpq %r14, %rbx
je .LBB1_7
.LBB1_4: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movq S(%rip), %rax
cmpb $0, (%rax,%r14)
je .LBB1_6
# %bb.5: # in Loop: Header=BB1_4 Depth=1
movl $.L.str.3, %edi
movl %r14d, %esi
xorl %eax, %eax
callq printf
jmp .LBB1_6
.LBB1_7: # %._crit_edge
movq S(%rip), %rdi
xorl %eax, %eax
testq %rdi, %rdi
je .LBB1_9
# %bb.8:
callq _ZdaPv
xorl %eax, %eax
.LBB1_9:
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function _Z12algorithm4_1y
.LCPI2_0:
.quad 6881500230622117888 # 0x5f80000000000000
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0
.LCPI2_1:
.long 0x3e800000 # float 0.25
.LCPI2_2:
.long 0x5f000000 # float 9.22337203E+18
.text
.globl _Z12algorithm4_1y
.p2align 4, 0x90
.type _Z12algorithm4_1y,@function
_Z12algorithm4_1y: # @_Z12algorithm4_1y
.cfi_startproc
# %bb.0: # %cdce.end
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $120, %rsp
.cfi_def_cfa_offset 176
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rdi, %r15
movq %rdi, 80(%rsp)
xorl %eax, %eax
testq %rdi, %rdi
sets %al
fildll 80(%rsp)
fadds .LCPI2_0(,%rax,4)
fstpt 32(%rsp) # 10-byte Folded Spill
callq _Znam
movq %rax, %rbx
movq %rax, 104(%rsp)
movq %r15, %rax
shrq $61, %rax
leaq (,%r15,8), %rcx
xorl %edi, %edi
negq %rax
sbbq %rdi, %rdi
orq %rcx, %rdi
callq _Znam
movq %rax, %r14
movq %rax, 112(%rsp)
fldt 32(%rsp) # 10-byte Folded Reload
fstpt (%rsp)
callq log10l
fmuls .LCPI2_1(%rip)
fstpt 92(%rsp) # 10-byte Folded Spill
movq S(%rip), %rdi
movw $0, (%rdi)
cmpq $3, %r15
jb .LBB2_2
# %bb.1: # %.lr.ph.preheader.i
addq $2, %rdi
leaq -2(%r15), %rdx
movl $1, %esi
callq memset@PLT
.LBB2_2: # %.preheader.i
fldt 32(%rsp) # 10-byte Folded Reload
fstpt (%rsp)
callq sqrtl
flds .LCPI2_2(%rip)
xorl %eax, %eax
fxch %st(1)
fucomi %st(1), %st
setae %al
fldz
fcmovnb %st(2), %st
fstp %st(2)
fsubp %st, %st(1)
fnstcw 22(%rsp)
movzwl 22(%rsp), %ecx
orl $3072, %ecx # imm = 0xC00
movw %cx, 30(%rsp)
fldcw 30(%rsp)
fistpll 72(%rsp)
fldcw 22(%rsp)
shlq $63, %rax
xorq 72(%rsp), %rax
cmpq $3, %rax
jb .LBB2_9
# %bb.3: # %.lr.ph32.i.preheader
movl $2, %r12d
jmp .LBB2_4
.p2align 4, 0x90
.LBB2_8: # %.loopexit.i
# in Loop: Header=BB2_4 Depth=1
incq %r12
fldt 32(%rsp) # 10-byte Folded Reload
fstpt (%rsp)
callq sqrtl
flds .LCPI2_2(%rip)
xorl %eax, %eax
fxch %st(1)
fucomi %st(1), %st
setae %al
fldz
fcmovnb %st(2), %st
fstp %st(2)
fsubp %st, %st(1)
fnstcw 18(%rsp)
movzwl 18(%rsp), %ecx
orl $3072, %ecx # imm = 0xC00
movw %cx, 26(%rsp)
fldcw 26(%rsp)
fistpll 64(%rsp)
fldcw 18(%rsp)
shlq $63, %rax
xorq 64(%rsp), %rax
cmpq %rax, %r12
jae .LBB2_9
.LBB2_4: # %.lr.ph32.i
# =>This Loop Header: Depth=1
# Child Loop BB2_6 Depth 2
movq S(%rip), %rax
cmpb $0, (%rax,%r12)
je .LBB2_8
# %bb.5: # in Loop: Header=BB2_4 Depth=1
movl %r12d, %ecx
imull %r12d, %ecx
movslq %ecx, %rcx
cmpq %r15, %rcx
jae .LBB2_8
.p2align 4, 0x90
.LBB2_6: # %.lr.ph29.i
# Parent Loop BB2_4 Depth=1
# => This Inner Loop Header: Depth=2
movb $0, (%rax,%rcx)
addl %r12d, %ecx
movslq %ecx, %rcx
cmpq %r15, %rcx
jb .LBB2_6
jmp .LBB2_8
.LBB2_9: # %._crit_edge.i
flds .LCPI2_2(%rip)
xorl %eax, %eax
fldt 92(%rsp) # 10-byte Folded Reload
fucomi %st(1), %st
setae %al
fldz
fcmovnb %st(2), %st
fstp %st(2)
fsubp %st, %st(1)
fnstcw 20(%rsp)
movzwl 20(%rsp), %ecx
orl $3072, %ecx # imm = 0xC00
movw %cx, 28(%rsp)
fldcw 28(%rsp)
fistpll 56(%rsp)
fldcw 20(%rsp)
shlq $63, %rax
xorq 56(%rsp), %rax
cmpq $3, %rax
jb .LBB2_10
# %bb.11: # %.lr.ph36.i
movq S(%rip), %rcx
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB2_12: # =>This Inner Loop Header: Depth=1
cmpb $0, (%rcx,%rax)
cmovneq %rax, %r12
decq %rax
cmpq $2, %rax
ja .LBB2_12
jmp .LBB2_13
.LBB2_10:
xorl %r12d, %r12d
.LBB2_13: # %_Z17EratosthenesSieveey.exit
movl $1, %r13d
testq %r12, %r12
je .LBB2_14
# %bb.22: # %.lr.ph
movq S(%rip), %rax
xorl %ecx, %ecx
jmp .LBB2_23
.p2align 4, 0x90
.LBB2_25: # in Loop: Header=BB2_23 Depth=1
imulq %rdx, %r13
incq %rcx
cmpq %rcx, %r12
je .LBB2_14
.LBB2_23: # =>This Inner Loop Header: Depth=1
cmpb $0, (%rax,%rcx)
movl $1, %edx
je .LBB2_25
# %bb.24: # in Loop: Header=BB2_23 Depth=1
movq %rcx, %rdx
jmp .LBB2_25
.LBB2_14: # %.preheader
testq %r15, %r15
je .LBB2_26
# %bb.15: # %.lr.ph52
xorl %ecx, %ecx
jmp .LBB2_16
.p2align 4, 0x90
.LBB2_33: # in Loop: Header=BB2_16 Depth=1
movq %rsi, (%r14,%rcx,8)
incq %rcx
cmpq %r15, %rcx
je .LBB2_26
.LBB2_16: # =>This Loop Header: Depth=1
# Child Loop BB2_18 Depth 2
# Child Loop BB2_20 Depth 2
# Child Loop BB2_31 Depth 3
movq %rcx, %rsi
testq %r13, %r13
je .LBB2_19
# %bb.17: # %.lr.ph.i.preheader
# in Loop: Header=BB2_16 Depth=1
movq %rcx, %rax
movq %r13, %rdx
.p2align 4, 0x90
.LBB2_18: # %.lr.ph.i
# Parent Loop BB2_16 Depth=1
# => This Inner Loop Header: Depth=2
movq %rdx, %rsi
xorl %edx, %edx
divq %rsi
movq %rsi, %rax
testq %rdx, %rdx
jne .LBB2_18
.LBB2_19: # %_Z3gcdyy.exit
# in Loop: Header=BB2_16 Depth=1
cmpq $1, %rsi
sete (%rbx,%rcx)
xorl %eax, %eax
jmp .LBB2_20
.p2align 4, 0x90
.LBB2_21: # in Loop: Header=BB2_20 Depth=2
movq %rax, %rdi
.LBB2_32: # %_Z3gcdyy.exit48
# in Loop: Header=BB2_20 Depth=2
leaq 1(%rsi), %rax
cmpq $1, %rdi
je .LBB2_33
.LBB2_20: # Parent Loop BB2_16 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB2_31 Depth 3
movq %rax, %rsi
addq %rcx, %rax
testq %r13, %r13
je .LBB2_21
# %bb.30: # %.lr.ph.i42.preheader
# in Loop: Header=BB2_20 Depth=2
movq %r13, %rdx
.p2align 4, 0x90
.LBB2_31: # %.lr.ph.i42
# Parent Loop BB2_16 Depth=1
# Parent Loop BB2_20 Depth=2
# => This Inner Loop Header: Depth=3
movq %rdx, %rdi
xorl %edx, %edx
divq %rdi
movq %rdi, %rax
testq %rdx, %rdx
jne .LBB2_31
jmp .LBB2_32
.LBB2_26: # %._crit_edge
movl P(%rip), %eax
movl %eax, 44(%rsp)
fldt 32(%rsp) # 10-byte Folded Reload
fidivl 44(%rsp)
fstpt (%rsp)
callq ceill@PLT
flds .LCPI2_2(%rip)
xorl %r8d, %r8d
fxch %st(1)
fucomi %st(1), %st
setae %r8b
fldz
fcmovnb %st(2), %st
fstp %st(2)
fsubp %st, %st(1)
fnstcw 16(%rsp)
movzwl 16(%rsp), %eax
orl $3072, %eax # imm = 0xC00
movw %ax, 24(%rsp)
fldcw 24(%rsp)
fistpll 48(%rsp)
fldcw 16(%rsp)
shlq $63, %r8
xorq 48(%rsp), %r8
leaq 104(%rsp), %rcx
movq %r15, %rdi
movq %r12, %rsi
movq %r13, %rdx
callq _Z13parallelSieveyyyRK7Wheel_ty
movl %eax, %ebp
testl %eax, %eax
je .LBB2_29
# %bb.27: # %._crit_edge
cmpb $0, check_cuda_status(%rip)
jne .LBB2_28
.LBB2_29:
movq %rbx, %rdi
callq _ZdaPv
movq %r14, %rdi
callq _ZdaPv
movl %ebp, %eax
addq $120, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB2_28:
.cfi_def_cfa_offset 176
movq stderr(%rip), %rcx
movl $.L.str.4, %edi
movl $23, %esi
movl $1, %edx
callq fwrite@PLT
jmp .LBB2_29
.Lfunc_end2:
.size _Z12algorithm4_1y, .Lfunc_end2-_Z12algorithm4_1y
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function _Z19report_running_timePKc
.LCPI3_0:
.quad 0x412e848000000000 # double 1.0E+6
.text
.globl _Z19report_running_timePKc
.p2align 4, 0x90
.type _Z19report_running_timePKc,@function
_Z19report_running_timePKc: # @_Z19report_running_timePKc
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rdi, %rbx
movl $endTime, %edi
movl $myTimezone, %esi
callq gettimeofday
movq endTime(%rip), %rax
subq startTime(%rip), %rax
movq endTime+8(%rip), %rcx
subq startTime+8(%rip), %rcx
leaq 1000000(%rcx), %r14
movq %rcx, %r15
sarq $63, %r15
addq %rax, %r15
testq %rcx, %rcx
cmovnsq %rcx, %r14
movl $.L.str.16, %edi
movq %rbx, %rsi
movq %r15, %rdx
movq %r14, %rcx
xorl %eax, %eax
callq printf
cvtsi2sd %r15, %xmm1
cvtsi2sd %r14, %xmm0
divsd .LCPI3_0(%rip), %xmm0
addsd %xmm1, %xmm0
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end3:
.size _Z19report_running_timePKc, .Lfunc_end3-_Z19report_running_timePKc
.cfi_endproc
# -- End function
.globl _Z3gcdyy # -- Begin function _Z3gcdyy
.p2align 4, 0x90
.type _Z3gcdyy,@function
_Z3gcdyy: # @_Z3gcdyy
.cfi_startproc
# %bb.0:
movq %rdi, %rax
testq %rsi, %rsi
je .LBB4_4
# %bb.1: # %.lr.ph.preheader
movq %rsi, %rdx
.p2align 4, 0x90
.LBB4_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movq %rdx, %rcx
xorl %edx, %edx
divq %rcx
movq %rcx, %rax
testq %rdx, %rdx
jne .LBB4_2
# %bb.3: # %._crit_edge
movq %rcx, %rax
.LBB4_4:
retq
.Lfunc_end4:
.size _Z3gcdyy, .Lfunc_end4-_Z3gcdyy
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function _Z17EratosthenesSieveey
.LCPI5_0:
.quad 6881500230622117888 # 0x5f80000000000000
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0
.LCPI5_1:
.long 0x5f000000 # float 9.22337203E+18
.text
.globl _Z17EratosthenesSieveey
.p2align 4, 0x90
.type _Z17EratosthenesSieveey,@function
_Z17EratosthenesSieveey: # @_Z17EratosthenesSieveey
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
subq $88, %rsp
.cfi_def_cfa_offset 112
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
movq %rdi, %rbx
fldt 112(%rsp)
fstpt 64(%rsp) # 10-byte Folded Spill
movq S(%rip), %rdi
movw $0, (%rdi)
cmpq $3, %rbx
jb .LBB5_2
# %bb.1: # %.lr.ph.preheader
addq $2, %rdi
leaq -2(%rbx), %rdx
movl $1, %esi
callq memset@PLT
.LBB5_2: # %.preheader
movq %rbx, 48(%rsp)
xorl %eax, %eax
testq %rbx, %rbx
sets %al
fildll 48(%rsp)
fadds .LCPI5_0(,%rax,4)
fld %st(0)
fstpt 76(%rsp) # 10-byte Folded Spill
fstpt (%rsp)
callq sqrtl
flds .LCPI5_1(%rip)
xorl %eax, %eax
fxch %st(1)
fucomi %st(1), %st
setae %al
fldz
fcmovnb %st(2), %st
fstp %st(2)
fsubp %st, %st(1)
fnstcw 24(%rsp)
movzwl 24(%rsp), %ecx
orl $3072, %ecx # imm = 0xC00
movw %cx, 30(%rsp)
fldcw 30(%rsp)
fistpll 56(%rsp)
fldcw 24(%rsp)
shlq $63, %rax
xorq 56(%rsp), %rax
cmpq $3, %rax
jb .LBB5_9
# %bb.3: # %.lr.ph32.preheader
movl $2, %r14d
jmp .LBB5_4
.p2align 4, 0x90
.LBB5_8: # %.loopexit
# in Loop: Header=BB5_4 Depth=1
incq %r14
fldt 76(%rsp) # 10-byte Folded Reload
fstpt (%rsp)
callq sqrtl
flds .LCPI5_1(%rip)
xorl %eax, %eax
fxch %st(1)
fucomi %st(1), %st
setae %al
fldz
fcmovnb %st(2), %st
fstp %st(2)
fsubp %st, %st(1)
fnstcw 20(%rsp)
movzwl 20(%rsp), %ecx
orl $3072, %ecx # imm = 0xC00
movw %cx, 26(%rsp)
fldcw 26(%rsp)
fistpll 40(%rsp)
fldcw 20(%rsp)
shlq $63, %rax
xorq 40(%rsp), %rax
cmpq %rax, %r14
jae .LBB5_9
.LBB5_4: # %.lr.ph32
# =>This Loop Header: Depth=1
# Child Loop BB5_6 Depth 2
movq S(%rip), %rax
cmpb $0, (%rax,%r14)
je .LBB5_8
# %bb.5: # in Loop: Header=BB5_4 Depth=1
movl %r14d, %ecx
imull %r14d, %ecx
movslq %ecx, %rcx
cmpq %rbx, %rcx
jae .LBB5_8
.p2align 4, 0x90
.LBB5_6: # %.lr.ph29
# Parent Loop BB5_4 Depth=1
# => This Inner Loop Header: Depth=2
movb $0, (%rax,%rcx)
addl %r14d, %ecx
movslq %ecx, %rcx
cmpq %rbx, %rcx
jb .LBB5_6
jmp .LBB5_8
.LBB5_9: # %._crit_edge
flds .LCPI5_1(%rip)
xorl %ecx, %ecx
fldt 64(%rsp) # 10-byte Folded Reload
fucomi %st(1), %st
setae %cl
fldz
fcmovnb %st(2), %st
fstp %st(2)
fsubp %st, %st(1)
fnstcw 22(%rsp)
movzwl 22(%rsp), %eax
orl $3072, %eax # imm = 0xC00
movw %ax, 28(%rsp)
fldcw 28(%rsp)
fistpll 32(%rsp)
fldcw 22(%rsp)
shlq $63, %rcx
xorq 32(%rsp), %rcx
cmpq $3, %rcx
jb .LBB5_10
# %bb.12: # %.lr.ph36
movq S(%rip), %rdx
xorl %eax, %eax
.p2align 4, 0x90
.LBB5_13: # =>This Inner Loop Header: Depth=1
cmpb $0, (%rdx,%rcx)
cmovneq %rcx, %rax
decq %rcx
cmpq $2, %rcx
ja .LBB5_13
jmp .LBB5_11
.LBB5_10:
xorl %eax, %eax
.LBB5_11: # %._crit_edge37
addq $88, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
retq
.Lfunc_end5:
.size _Z17EratosthenesSieveey, .Lfunc_end5-_Z17EratosthenesSieveey
.cfi_endproc
# -- End function
.section .rodata.cst16,"aM",@progbits,16
.p2align 4, 0x0 # -- Begin function _Z13parallelSieveyyyRK7Wheel_ty
.LCPI6_0:
.long 1127219200 # 0x43300000
.long 1160773632 # 0x45300000
.long 0 # 0x0
.long 0 # 0x0
.LCPI6_1:
.quad 0x4330000000000000 # double 4503599627370496
.quad 0x4530000000000000 # double 1.9342813113834067E+25
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0
.LCPI6_2:
.long 0x3b800000 # float 0.00390625
.text
.globl _Z13parallelSieveyyyRK7Wheel_ty
.p2align 4, 0x90
.type _Z13parallelSieveyyyRK7Wheel_ty,@function
_Z13parallelSieveyyyRK7Wheel_ty: # @_Z13parallelSieveyyyRK7Wheel_ty
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $248, %rsp
.cfi_def_cfa_offset 304
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %r8, %r14
movq %rcx, %r13
movq %rdx, %r15
movq %rsi, %r12
movq %rdi, %rbx
leaq 64(%rsp), %rdi
callq hipEventCreate
leaq 56(%rsp), %rdi
callq hipEventCreate
movq $0, 24(%rsp)
xorpd %xmm0, %xmm0
movapd %xmm0, 32(%rsp)
xorl %edi, %edi
callq hipSetDevice
cmpb $0, check_cuda_status(%rip)
je .LBB6_3
# %bb.1:
testl %eax, %eax
jne .LBB6_2
.LBB6_3:
movq 64(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
leaq 24(%rsp), %rdi
movq %rbx, %rsi
callq hipMalloc
cmpb $0, check_cuda_status(%rip)
je .LBB6_6
# %bb.4:
testl %eax, %eax
jne .LBB6_5
.LBB6_6:
leaq 32(%rsp), %rdi
movq %rbx, %rsi
callq hipMalloc
cmpb $0, check_cuda_status(%rip)
je .LBB6_9
# %bb.7:
testl %eax, %eax
jne .LBB6_8
.LBB6_9:
leaq 40(%rsp), %rdi
leaq (,%rbx,8), %rbp
movq %rbp, %rsi
callq hipMalloc
cmpb $0, check_cuda_status(%rip)
je .LBB6_12
# %bb.10:
testl %eax, %eax
jne .LBB6_11
.LBB6_12:
movq 24(%rsp), %rdi
movq S(%rip), %rsi
movq %rbx, %rdx
movl $1, %ecx
callq hipMemcpy
cmpb $0, check_cuda_status(%rip)
je .LBB6_15
# %bb.13:
testl %eax, %eax
jne .LBB6_14
.LBB6_15:
movq 32(%rsp), %rdi
movq (%r13), %rsi
movq %rbx, %rdx
movl $1, %ecx
callq hipMemcpy
cmpb $0, check_cuda_status(%rip)
je .LBB6_18
# %bb.16:
testl %eax, %eax
jne .LBB6_17
.LBB6_18:
movq 40(%rsp), %rdi
movq 8(%r13), %rsi
movq %rbp, %rdx
movl $1, %ecx
callq hipMemcpy
cmpb $0, check_cuda_status(%rip)
je .LBB6_21
# %bb.19:
testl %eax, %eax
jne .LBB6_20
.LBB6_21:
movabsq $4294967296, %rbp # imm = 0x100000000
movq %rbx, %xmm1
punpckldq .LCPI6_0(%rip), %xmm1 # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1]
subpd .LCPI6_1(%rip), %xmm1
movapd %xmm1, %xmm0
unpckhpd %xmm1, %xmm0 # xmm0 = xmm0[1],xmm1[1]
addsd %xmm1, %xmm0
xorpd %xmm1, %xmm1
ucomisd %xmm1, %xmm0
jb .LBB6_23
# %bb.22:
sqrtsd %xmm0, %xmm0
jmp .LBB6_24
.LBB6_23: # %call.sqrt
callq sqrt
.LBB6_24: # %.split
callq ceil@PLT
movsd %xmm0, 72(%rsp)
fldl 72(%rsp)
fmuls .LCPI6_2(%rip)
fstpt (%rsp)
callq ceill@PLT
fnstcw 52(%rsp)
movzwl 52(%rsp), %eax
orl $3072, %eax # imm = 0xC00
movw %ax, 54(%rsp)
fldcw 54(%rsp)
fistpll 80(%rsp)
fldcw 52(%rsp)
movl 80(%rsp), %edi
orq %rbp, %rdi
addq $256, %rbp # imm = 0x100
movl $1, %esi
movq %rbp, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB6_26
# %bb.25:
movupd (%r13), %xmm0
movq 24(%rsp), %rax
movupd %xmm0, 232(%rsp)
movq %rbx, 168(%rsp)
movq %r12, 160(%rsp)
movq %r15, 152(%rsp)
movq %r14, 144(%rsp)
movq %rax, 136(%rsp)
leaq 168(%rsp), %rax
movq %rax, 176(%rsp)
leaq 160(%rsp), %rax
movq %rax, 184(%rsp)
leaq 152(%rsp), %rax
movq %rax, 192(%rsp)
leaq 232(%rsp), %rax
movq %rax, 200(%rsp)
leaq 144(%rsp), %rax
movq %rax, 208(%rsp)
leaq 136(%rsp), %rax
movq %rax, 216(%rsp)
leaq 120(%rsp), %rdi
leaq 104(%rsp), %rsi
leaq 96(%rsp), %rdx
leaq 88(%rsp), %rcx
callq __hipPopCallConfiguration
movq 96(%rsp), %rax
movq 88(%rsp), %rdi
movq 120(%rsp), %rsi
movl 128(%rsp), %edx
movq 104(%rsp), %rcx
movl 112(%rsp), %r8d
movq %rdi, 8(%rsp)
movq %rax, (%rsp)
leaq 176(%rsp), %r9
movl $_Z19parallelSieveKernelyyy7Wheel_tyPb, %edi
callq hipLaunchKernel
.LBB6_26:
callq hipGetLastError
cmpb $0, check_cuda_status(%rip)
je .LBB6_29
# %bb.27:
testl %eax, %eax
jne .LBB6_28
.LBB6_29:
callq hipDeviceSynchronize
cmpb $0, check_cuda_status(%rip)
je .LBB6_32
# %bb.30:
testl %eax, %eax
jne .LBB6_31
.LBB6_32:
movq S(%rip), %rdi
movq 24(%rsp), %rsi
movq %rbx, %rdx
movl $2, %ecx
callq hipMemcpy
movl %eax, %ebx
cmpb $0, check_cuda_status(%rip)
je .LBB6_37
# %bb.33:
testl %ebx, %ebx
jne .LBB6_34
.LBB6_37:
movq 56(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 56(%rsp), %rdi
callq hipEventSynchronize
movq 64(%rsp), %rsi
movq 56(%rsp), %rdx
leaq 176(%rsp), %rdi
callq hipEventElapsedTime
movss 176(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.15, %edi
movb $1, %al
callq printf
jmp .LBB6_38
.LBB6_2:
movl %eax, %ebx
movq stderr(%rip), %rcx
movl $.L.str.5, %edi
movl $64, %esi
movl $1, %edx
callq fwrite@PLT
jmp .LBB6_39
.LBB6_5:
movl %eax, %ebx
movq stderr(%rip), %rcx
movl $.L.str.6, %edi
movl $36, %esi
jmp .LBB6_36
.LBB6_8:
movl %eax, %ebx
movq stderr(%rip), %rcx
movl $.L.str.7, %edi
movl $30, %esi
jmp .LBB6_36
.LBB6_11:
movl %eax, %ebx
movq stderr(%rip), %rcx
movl $.L.str.8, %edi
movl $32, %esi
jmp .LBB6_36
.LBB6_14:
movl %eax, %ebx
movq stderr(%rip), %rcx
movl $.L.str.9, %edi
jmp .LBB6_35
.LBB6_17:
movl %eax, %ebx
movq stderr(%rip), %rcx
movl $.L.str.10, %edi
movl $39, %esi
jmp .LBB6_36
.LBB6_20:
movl %eax, %ebx
movq stderr(%rip), %rcx
movl $.L.str.11, %edi
movl $43, %esi
jmp .LBB6_36
.LBB6_28:
movq stderr(%rip), %r14
movl %eax, %ebx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.12, %esi
movq %r14, %rdi
movq %rax, %rdx
xorl %eax, %eax
callq fprintf
jmp .LBB6_38
.LBB6_31:
movq stderr(%rip), %rdi
movl $.L.str.13, %esi
movl %eax, %ebx
movl %eax, %edx
xorl %eax, %eax
callq fprintf
jmp .LBB6_38
.LBB6_34:
movq stderr(%rip), %rcx
movl $.L.str.14, %edi
.LBB6_35:
movl $26, %esi
.LBB6_36:
movl $1, %edx
callq fwrite@PLT
.LBB6_38:
movq 24(%rsp), %rdi
callq hipFree
movq 32(%rsp), %rdi
callq hipFree
movq 40(%rsp), %rdi
callq hipFree
.LBB6_39:
movl %ebx, %eax
addq $248, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end6:
.size _Z13parallelSieveyyyRK7Wheel_ty, .Lfunc_end6-_Z13parallelSieveyyyRK7Wheel_ty
.cfi_endproc
# -- End function
.globl _Z7cleanupPbR7Wheel_t10hipError_t # -- Begin function _Z7cleanupPbR7Wheel_t10hipError_t
.p2align 4, 0x90
.type _Z7cleanupPbR7Wheel_t10hipError_t,@function
_Z7cleanupPbR7Wheel_t10hipError_t: # @_Z7cleanupPbR7Wheel_t10hipError_t
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
pushq %rax
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
movl %edx, %ebx
movq %rsi, %r14
callq hipFree
movq (%r14), %rdi
callq hipFree
movq 8(%r14), %rdi
callq hipFree
movl %ebx, %eax
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
retq
.Lfunc_end7:
.size _Z7cleanupPbR7Wheel_t10hipError_t, .Lfunc_end7-_Z7cleanupPbR7Wheel_t10hipError_t
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB8_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB8_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z19parallelSieveKernelyyy7Wheel_tyPb, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end8:
.size __hip_module_ctor, .Lfunc_end8-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB9_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB9_2:
retq
.Lfunc_end9:
.size __hip_module_dtor, .Lfunc_end9-__hip_module_dtor
.cfi_endproc
# -- End function
.type S,@object # @S
.bss
.globl S
.p2align 3, 0x0
S:
.quad 0
.size S, 8
.type P,@object # @P
.globl P
.p2align 2, 0x0
P:
.long 0 # 0x0
.size P, 4
.type check_cuda_status,@object # @check_cuda_status
.globl check_cuda_status
check_cuda_status:
.byte 0 # 0x0
.size check_cuda_status, 1
.type myTimezone,@object # @myTimezone
.globl myTimezone
.p2align 2, 0x0
myTimezone:
.zero 8
.size myTimezone, 8
.type startTime,@object # @startTime
.globl startTime
.p2align 3, 0x0
startTime:
.zero 16
.size startTime, 16
.type endTime,@object # @endTime
.globl endTime
.p2align 3, 0x0
endTime:
.zero 16
.size endTime, 16
.type _Z19parallelSieveKernelyyy7Wheel_tyPb,@object # @_Z19parallelSieveKernelyyy7Wheel_tyPb
.section .rodata,"a",@progbits
.globl _Z19parallelSieveKernelyyy7Wheel_tyPb
.p2align 3, 0x0
_Z19parallelSieveKernelyyy7Wheel_tyPb:
.quad _Z34__device_stub__parallelSieveKernelyyy7Wheel_tyPb
.size _Z19parallelSieveKernelyyy7Wheel_tyPb, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Find primes up to: %llu\n\n"
.size .L.str, 26
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "Algorithm 4.1"
.size .L.str.1, 14
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "Algorithm 4.1 failed to execute!"
.size .L.str.2, 33
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "%llu "
.size .L.str.3, 6
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "parallelSieve() failed!"
.size .L.str.4, 24
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "hipSetDevice failed! Do you have a CUDA-capable GPU installed?\n"
.size .L.str.5, 65
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "hipMalloc failed on number field S!\n"
.size .L.str.6, 37
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "hipMalloc failed on wheel.rp!\n"
.size .L.str.7, 31
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "hipMalloc failed on wheel.dist!\n"
.size .L.str.8, 33
.type .L.str.9,@object # @.str.9
.L.str.9:
.asciz "hipMemcpy failed! S->d_S.\n"
.size .L.str.9, 27
.type .L.str.10,@object # @.str.10
.L.str.10:
.asciz "hipMemcpy failed! wheel.rp->d_wheel.rp\n"
.size .L.str.10, 40
.type .L.str.11,@object # @.str.11
.L.str.11:
.asciz "hipMemcpy failed! wheel.dist->d_wheel.dist\n"
.size .L.str.11, 44
.type .L.str.12,@object # @.str.12
.L.str.12:
.asciz "parallelSieveKernel launch failed: %s\n"
.size .L.str.12, 39
.type .L.str.13,@object # @.str.13
.L.str.13:
.asciz "hipDeviceSynchronize returned error code %d after launching addKernel!\n"
.size .L.str.13, 72
.type .L.str.14,@object # @.str.14
.L.str.14:
.asciz "hipMemcpy failed! d_S->S.\n"
.size .L.str.14, 27
.type .L.str.15,@object # @.str.15
.L.str.15:
.asciz "Time to generate: %0.5f ms\n"
.size .L.str.15, 28
.type .L.str.16,@object # @.str.16
.L.str.16:
.asciz "Running time for %s: %ld.%06ld sec\n\n"
.size .L.str.16, 37
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z19parallelSieveKernelyyy7Wheel_tyPb"
.size .L__unnamed_1, 38
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z34__device_stub__parallelSieveKernelyyy7Wheel_tyPb
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym myTimezone
.addrsig_sym startTime
.addrsig_sym endTime
.addrsig_sym _Z19parallelSieveKernelyyy7Wheel_tyPb
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_
|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
|
//device code
__global__ void VecAdd(float* a, float* b, float* c, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
{
c[i] = a[i] + b[i];
}
}
//host code
int main()
{
int N = 1024;
size_t size = N*sizeof(float);
//allocate input vectors in host memory
float* h_A = (float*)malloc(size);
float* h_B = (float*)malloc(size);
float* h_C = (float*)malloc(size);
//initialize input vectors
for (int i = 0; i < N; i++)
{
h_A[i] = 0;
h_B[i] = 0;
}
//allocate vectors in device memory
float* d_A;
cudaMalloc(&d_A, size);
float* d_B;
cudaMalloc(&d_B, size);
float* d_C;
cudaMalloc(&d_C, size);
//copy from host to device
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
//Invoke kernel
int threadsPerBlock = 256;
int blocksPerGrid = (N+threadsPerBlock - 1)/threadsPerBlock;
VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
//Copy result from device to host. h_C is the result.
cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
//free device memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
//free host memory
free(h_A);
free(h_B);
free(h_C);
}
|
code for sm_80
Function : _Z6VecAddPfS_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R6, R6, c[0x0][0x0], R3 ; /* 0x0000000006067a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R6, c[0x0][0x178], PT ; /* 0x00005e0006007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R4, R6, R7, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x000fc800078e0207 */
/*0090*/ IMAD.WIDE R2, R6.reuse, R7.reuse, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x0c0fe400078e0207 */
/*00a0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*00b0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*00c0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fc800078e0207 */
/*00d0*/ FADD R9, R4, R3 ; /* 0x0000000304097221 */
/* 0x004fca0000000000 */
/*00e0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0100*/ BRA 0x100; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
|
//device code
__global__ void VecAdd(float* a, float* b, float* c, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
{
c[i] = a[i] + b[i];
}
}
//host code
int main()
{
int N = 1024;
size_t size = N*sizeof(float);
//allocate input vectors in host memory
float* h_A = (float*)malloc(size);
float* h_B = (float*)malloc(size);
float* h_C = (float*)malloc(size);
//initialize input vectors
for (int i = 0; i < N; i++)
{
h_A[i] = 0;
h_B[i] = 0;
}
//allocate vectors in device memory
float* d_A;
cudaMalloc(&d_A, size);
float* d_B;
cudaMalloc(&d_B, size);
float* d_C;
cudaMalloc(&d_C, size);
//copy from host to device
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
//Invoke kernel
int threadsPerBlock = 256;
int blocksPerGrid = (N+threadsPerBlock - 1)/threadsPerBlock;
VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
//Copy result from device to host. h_C is the result.
cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
//free device memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
//free host memory
free(h_A);
free(h_B);
free(h_C);
}
|
.file "tmpxft_000064ab_00000000-6_mem.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2030:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2030:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z30__device_stub__Z6VecAddPfS_S_iPfS_S_i
.type _Z30__device_stub__Z6VecAddPfS_S_iPfS_S_i, @function
_Z30__device_stub__Z6VecAddPfS_S_iPfS_S_i:
.LFB2052:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z6VecAddPfS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2052:
.size _Z30__device_stub__Z6VecAddPfS_S_iPfS_S_i, .-_Z30__device_stub__Z6VecAddPfS_S_iPfS_S_i
.globl _Z6VecAddPfS_S_i
.type _Z6VecAddPfS_S_i, @function
_Z6VecAddPfS_S_i:
.LFB2053:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z30__device_stub__Z6VecAddPfS_S_iPfS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2053:
.size _Z6VecAddPfS_S_i, .-_Z6VecAddPfS_S_i
.globl main
.type main, @function
main:
.LFB2027:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $64, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
movl $4096, %edi
call malloc@PLT
movq %rax, %rbp
movl $4096, %edi
call malloc@PLT
movq %rax, %rbx
movl $4096, %edi
call malloc@PLT
movq %rax, %r12
movl $0, %eax
.L12:
movl $0x00000000, 0(%rbp,%rax)
movl $0x00000000, (%rbx,%rax)
addq $4, %rax
cmpq $4096, %rax
jne .L12
leaq 8(%rsp), %rdi
movl $4096, %esi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movl $4096, %esi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movl $4096, %esi
call cudaMalloc@PLT
movl $1, %ecx
movl $4096, %edx
movq %rbp, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $4096, %edx
movq %rbx, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl $256, 44(%rsp)
movl $1, 48(%rsp)
movl $4, 32(%rsp)
movl $1, 36(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L17
.L13:
movl $2, %ecx
movl $4096, %edx
movq 24(%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq %rbp, %rdi
call free@PLT
movq %rbx, %rdi
call free@PLT
movq %r12, %rdi
call free@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L18
movl $0, %eax
addq $64, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L17:
.cfi_restore_state
movl $1024, %ecx
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z30__device_stub__Z6VecAddPfS_S_iPfS_S_i
jmp .L13
.L18:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2027:
.size main, .-main
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "_Z6VecAddPfS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2055:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z6VecAddPfS_S_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2055:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4:
|
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
|
//device code
__global__ void VecAdd(float* a, float* b, float* c, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
{
c[i] = a[i] + b[i];
}
}
//host code
int main()
{
int N = 1024;
size_t size = N*sizeof(float);
//allocate input vectors in host memory
float* h_A = (float*)malloc(size);
float* h_B = (float*)malloc(size);
float* h_C = (float*)malloc(size);
//initialize input vectors
for (int i = 0; i < N; i++)
{
h_A[i] = 0;
h_B[i] = 0;
}
//allocate vectors in device memory
float* d_A;
cudaMalloc(&d_A, size);
float* d_B;
cudaMalloc(&d_B, size);
float* d_C;
cudaMalloc(&d_C, size);
//copy from host to device
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
//Invoke kernel
int threadsPerBlock = 256;
int blocksPerGrid = (N+threadsPerBlock - 1)/threadsPerBlock;
VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
//Copy result from device to host. h_C is the result.
cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
//free device memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
//free host memory
free(h_A);
free(h_B);
free(h_C);
}
|
#include <hip/hip_runtime.h>
//device code
__global__ void VecAdd(float* a, float* b, float* c, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
{
c[i] = a[i] + b[i];
}
}
//host code
int main()
{
int N = 1024;
size_t size = N*sizeof(float);
//allocate input vectors in host memory
float* h_A = (float*)malloc(size);
float* h_B = (float*)malloc(size);
float* h_C = (float*)malloc(size);
//initialize input vectors
for (int i = 0; i < N; i++)
{
h_A[i] = 0;
h_B[i] = 0;
}
//allocate vectors in device memory
float* d_A;
hipMalloc(&d_A, size);
float* d_B;
hipMalloc(&d_B, size);
float* d_C;
hipMalloc(&d_C, size);
//copy from host to device
hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice);
//Invoke kernel
int threadsPerBlock = 256;
int blocksPerGrid = (N+threadsPerBlock - 1)/threadsPerBlock;
VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
//Copy result from device to host. h_C is the result.
hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost);
//free device memory
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
//free host memory
free(h_A);
free(h_B);
free(h_C);
}
|
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
|
#include <hip/hip_runtime.h>
//device code
__global__ void VecAdd(float* a, float* b, float* c, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
{
c[i] = a[i] + b[i];
}
}
//host code
int main()
{
int N = 1024;
size_t size = N*sizeof(float);
//allocate input vectors in host memory
float* h_A = (float*)malloc(size);
float* h_B = (float*)malloc(size);
float* h_C = (float*)malloc(size);
//initialize input vectors
for (int i = 0; i < N; i++)
{
h_A[i] = 0;
h_B[i] = 0;
}
//allocate vectors in device memory
float* d_A;
hipMalloc(&d_A, size);
float* d_B;
hipMalloc(&d_B, size);
float* d_C;
hipMalloc(&d_C, size);
//copy from host to device
hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice);
//Invoke kernel
int threadsPerBlock = 256;
int blocksPerGrid = (N+threadsPerBlock - 1)/threadsPerBlock;
VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
//Copy result from device to host. h_C is the result.
hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost);
//free device memory
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
//free host memory
free(h_A);
free(h_B);
free(h_C);
}
|
.text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z6VecAddPfS_S_i
.globl _Z6VecAddPfS_S_i
.p2align 8
.type _Z6VecAddPfS_S_i,@function
_Z6VecAddPfS_S_i:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b32 s3, s[0:1], 0x18
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_load_b128 s[4:7], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_f32_e32 v2, v2, v3
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z6VecAddPfS_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z6VecAddPfS_S_i, .Lfunc_end0-_Z6VecAddPfS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z6VecAddPfS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z6VecAddPfS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata
|
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
|
#include <hip/hip_runtime.h>
//device code
__global__ void VecAdd(float* a, float* b, float* c, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
{
c[i] = a[i] + b[i];
}
}
//host code
int main()
{
int N = 1024;
size_t size = N*sizeof(float);
//allocate input vectors in host memory
float* h_A = (float*)malloc(size);
float* h_B = (float*)malloc(size);
float* h_C = (float*)malloc(size);
//initialize input vectors
for (int i = 0; i < N; i++)
{
h_A[i] = 0;
h_B[i] = 0;
}
//allocate vectors in device memory
float* d_A;
hipMalloc(&d_A, size);
float* d_B;
hipMalloc(&d_B, size);
float* d_C;
hipMalloc(&d_C, size);
//copy from host to device
hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice);
//Invoke kernel
int threadsPerBlock = 256;
int blocksPerGrid = (N+threadsPerBlock - 1)/threadsPerBlock;
VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
//Copy result from device to host. h_C is the result.
hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost);
//free device memory
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
//free host memory
free(h_A);
free(h_B);
free(h_C);
}
|
.text
.file "mem.hip"
.globl _Z21__device_stub__VecAddPfS_S_i # -- Begin function _Z21__device_stub__VecAddPfS_S_i
.p2align 4, 0x90
.type _Z21__device_stub__VecAddPfS_S_i,@function
_Z21__device_stub__VecAddPfS_S_i: # @_Z21__device_stub__VecAddPfS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z6VecAddPfS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z21__device_stub__VecAddPfS_S_i, .Lfunc_end0-_Z21__device_stub__VecAddPfS_S_i
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $144, %rsp
.cfi_def_cfa_offset 176
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl $4096, %edi # imm = 0x1000
callq malloc
movq %rax, %rbx
movl $4096, %edi # imm = 0x1000
callq malloc
movq %rax, %r14
movl $4096, %edi # imm = 0x1000
callq malloc
movq %rax, %r15
movl $4096, %edx # imm = 0x1000
movq %rbx, %rdi
xorl %esi, %esi
callq memset@PLT
movl $4096, %edx # imm = 0x1000
movq %r14, %rdi
xorl %esi, %esi
callq memset@PLT
leaq 24(%rsp), %rdi
movl $4096, %esi # imm = 0x1000
callq hipMalloc
leaq 16(%rsp), %rdi
movl $4096, %esi # imm = 0x1000
callq hipMalloc
leaq 8(%rsp), %rdi
movl $4096, %esi # imm = 0x1000
callq hipMalloc
movq 24(%rsp), %rdi
movl $4096, %edx # imm = 0x1000
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movq 16(%rsp), %rdi
movl $4096, %edx # imm = 0x1000
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
movabsq $4294967300, %rdi # imm = 0x100000004
leaq 252(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
movq 24(%rsp), %rax
movq 16(%rsp), %rcx
movq 8(%rsp), %rdx
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movq %rdx, 88(%rsp)
movl $1024, 36(%rsp) # imm = 0x400
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 88(%rsp), %rax
movq %rax, 128(%rsp)
leaq 36(%rsp), %rax
movq %rax, 136(%rsp)
leaq 72(%rsp), %rdi
leaq 56(%rsp), %rsi
leaq 48(%rsp), %rdx
leaq 40(%rsp), %rcx
callq __hipPopCallConfiguration
movq 72(%rsp), %rsi
movl 80(%rsp), %edx
movq 56(%rsp), %rcx
movl 64(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z6VecAddPfS_S_i, %edi
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
movq 8(%rsp), %rsi
movl $4096, %edx # imm = 0x1000
movq %r15, %rdi
movl $2, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
movq %r15, %rdi
callq free
xorl %eax, %eax
addq $144, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z6VecAddPfS_S_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z6VecAddPfS_S_i,@object # @_Z6VecAddPfS_S_i
.section .rodata,"a",@progbits
.globl _Z6VecAddPfS_S_i
.p2align 3, 0x0
_Z6VecAddPfS_S_i:
.quad _Z21__device_stub__VecAddPfS_S_i
.size _Z6VecAddPfS_S_i, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z6VecAddPfS_S_i"
.size .L__unnamed_1, 17
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z21__device_stub__VecAddPfS_S_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z6VecAddPfS_S_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_
|
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly.
|
code for sm_80
Function : _Z6VecAddPfS_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R6, R6, c[0x0][0x0], R3 ; /* 0x0000000006067a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R6, c[0x0][0x178], PT ; /* 0x00005e0006007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R4, R6, R7, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x000fc800078e0207 */
/*0090*/ IMAD.WIDE R2, R6.reuse, R7.reuse, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x0c0fe400078e0207 */
/*00a0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*00b0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*00c0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fc800078e0207 */
/*00d0*/ FADD R9, R4, R3 ; /* 0x0000000304097221 */
/* 0x004fca0000000000 */
/*00e0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0100*/ BRA 0x100; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
|
.text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z6VecAddPfS_S_i
.globl _Z6VecAddPfS_S_i
.p2align 8
.type _Z6VecAddPfS_S_i,@function
_Z6VecAddPfS_S_i:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b32 s3, s[0:1], 0x18
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_load_b128 s[4:7], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_f32_e32 v2, v2, v3
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z6VecAddPfS_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z6VecAddPfS_S_i, .Lfunc_end0-_Z6VecAddPfS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z6VecAddPfS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z6VecAddPfS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata
|
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly.
|
.file "tmpxft_000064ab_00000000-6_mem.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2030:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2030:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z30__device_stub__Z6VecAddPfS_S_iPfS_S_i
.type _Z30__device_stub__Z6VecAddPfS_S_iPfS_S_i, @function
_Z30__device_stub__Z6VecAddPfS_S_iPfS_S_i:
.LFB2052:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z6VecAddPfS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2052:
.size _Z30__device_stub__Z6VecAddPfS_S_iPfS_S_i, .-_Z30__device_stub__Z6VecAddPfS_S_iPfS_S_i
.globl _Z6VecAddPfS_S_i
.type _Z6VecAddPfS_S_i, @function
_Z6VecAddPfS_S_i:
.LFB2053:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z30__device_stub__Z6VecAddPfS_S_iPfS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2053:
.size _Z6VecAddPfS_S_i, .-_Z6VecAddPfS_S_i
.globl main
.type main, @function
main:
.LFB2027:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $64, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
movl $4096, %edi
call malloc@PLT
movq %rax, %rbp
movl $4096, %edi
call malloc@PLT
movq %rax, %rbx
movl $4096, %edi
call malloc@PLT
movq %rax, %r12
movl $0, %eax
.L12:
movl $0x00000000, 0(%rbp,%rax)
movl $0x00000000, (%rbx,%rax)
addq $4, %rax
cmpq $4096, %rax
jne .L12
leaq 8(%rsp), %rdi
movl $4096, %esi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movl $4096, %esi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movl $4096, %esi
call cudaMalloc@PLT
movl $1, %ecx
movl $4096, %edx
movq %rbp, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $4096, %edx
movq %rbx, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl $256, 44(%rsp)
movl $1, 48(%rsp)
movl $4, 32(%rsp)
movl $1, 36(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L17
.L13:
movl $2, %ecx
movl $4096, %edx
movq 24(%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq %rbp, %rdi
call free@PLT
movq %rbx, %rdi
call free@PLT
movq %r12, %rdi
call free@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L18
movl $0, %eax
addq $64, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L17:
.cfi_restore_state
movl $1024, %ecx
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z30__device_stub__Z6VecAddPfS_S_iPfS_S_i
jmp .L13
.L18:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2027:
.size main, .-main
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "_Z6VecAddPfS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2055:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z6VecAddPfS_S_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2055:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4:
|
.text
.file "mem.hip"
.globl _Z21__device_stub__VecAddPfS_S_i # -- Begin function _Z21__device_stub__VecAddPfS_S_i
.p2align 4, 0x90
.type _Z21__device_stub__VecAddPfS_S_i,@function
_Z21__device_stub__VecAddPfS_S_i: # @_Z21__device_stub__VecAddPfS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z6VecAddPfS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z21__device_stub__VecAddPfS_S_i, .Lfunc_end0-_Z21__device_stub__VecAddPfS_S_i
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $144, %rsp
.cfi_def_cfa_offset 176
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl $4096, %edi # imm = 0x1000
callq malloc
movq %rax, %rbx
movl $4096, %edi # imm = 0x1000
callq malloc
movq %rax, %r14
movl $4096, %edi # imm = 0x1000
callq malloc
movq %rax, %r15
movl $4096, %edx # imm = 0x1000
movq %rbx, %rdi
xorl %esi, %esi
callq memset@PLT
movl $4096, %edx # imm = 0x1000
movq %r14, %rdi
xorl %esi, %esi
callq memset@PLT
leaq 24(%rsp), %rdi
movl $4096, %esi # imm = 0x1000
callq hipMalloc
leaq 16(%rsp), %rdi
movl $4096, %esi # imm = 0x1000
callq hipMalloc
leaq 8(%rsp), %rdi
movl $4096, %esi # imm = 0x1000
callq hipMalloc
movq 24(%rsp), %rdi
movl $4096, %edx # imm = 0x1000
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movq 16(%rsp), %rdi
movl $4096, %edx # imm = 0x1000
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
movabsq $4294967300, %rdi # imm = 0x100000004
leaq 252(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
movq 24(%rsp), %rax
movq 16(%rsp), %rcx
movq 8(%rsp), %rdx
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movq %rdx, 88(%rsp)
movl $1024, 36(%rsp) # imm = 0x400
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 88(%rsp), %rax
movq %rax, 128(%rsp)
leaq 36(%rsp), %rax
movq %rax, 136(%rsp)
leaq 72(%rsp), %rdi
leaq 56(%rsp), %rsi
leaq 48(%rsp), %rdx
leaq 40(%rsp), %rcx
callq __hipPopCallConfiguration
movq 72(%rsp), %rsi
movl 80(%rsp), %edx
movq 56(%rsp), %rcx
movl 64(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z6VecAddPfS_S_i, %edi
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
movq 8(%rsp), %rsi
movl $4096, %edx # imm = 0x1000
movq %r15, %rdi
movl $2, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
movq %r15, %rdi
callq free
xorl %eax, %eax
addq $144, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z6VecAddPfS_S_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z6VecAddPfS_S_i,@object # @_Z6VecAddPfS_S_i
.section .rodata,"a",@progbits
.globl _Z6VecAddPfS_S_i
.p2align 3, 0x0
_Z6VecAddPfS_S_i:
.quad _Z21__device_stub__VecAddPfS_S_i
.size _Z6VecAddPfS_S_i, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z6VecAddPfS_S_i"
.size .L__unnamed_1, 17
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z21__device_stub__VecAddPfS_S_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z6VecAddPfS_S_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_
|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly.
|
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__ void vecAdd(double * a, double * b, double * c, int n) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < n)
c[id] = a[id] + b[id];
}
int main(int argc, char * argv[]) {
int n = 100, i;
double *h_a, *h_b, *h_c;
double *d_a, *d_b, *d_c;
size_t bytes = n * sizeof(double);
h_a = (double*)malloc(bytes);
h_b = (double*)malloc(bytes);
h_c = (double*)malloc(bytes);
cudaMalloc(&d_a, bytes);
cudaMalloc(&d_b, bytes);
cudaMalloc(&d_c, bytes);
for (i = 0; i < n; i++)
h_a[i] = h_b[i] = i;
cudaMemcpy(d_a, h_a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, bytes, cudaMemcpyHostToDevice);
int blockSize, gridSize;
blockSize = 10;
gridSize = (int) ceil((float) n / blockSize);
vecAdd <<<gridSize, blockSize>>>(d_a, d_b, d_c, n);
cudaMemcpy(h_c, d_c, bytes, cudaMemcpyDeviceToHost);
for (i = 0; i < n; i++)
printf(" %f + %f =%f\n", h_a[i], h_b[i], h_c[i]);
cudaFree(d_a);cudaFree(d_b);cudaFree(d_c);
free(h_a);free(h_b);free(h_c);
return 0;
}
|
code for sm_80
Function : _Z6vecAddPdS_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R8, SR_CTAID.X ; /* 0x0000000000087919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R8, R8, c[0x0][0x0], R3 ; /* 0x0000000008087a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R8, c[0x0][0x178], PT ; /* 0x00005e0008007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R9, -RZ, RZ, 0, 4.76837158203125e-07 ; /* 0x00000008ff097435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R4, R8, R9, c[0x0][0x168] ; /* 0x00005a0008047625 */
/* 0x000fc800078e0209 */
/*0090*/ IMAD.WIDE R2, R8.reuse, R9.reuse, c[0x0][0x160] ; /* 0x0000580008027625 */
/* 0x0c0fe400078e0209 */
/*00a0*/ LDG.E.64 R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1b00 */
/*00b0*/ LDG.E.64 R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea2000c1e1b00 */
/*00c0*/ IMAD.WIDE R8, R8, R9, c[0x0][0x170] ; /* 0x00005c0008087625 */
/* 0x000fe200078e0209 */
/*00d0*/ DADD R6, R4, R2 ; /* 0x0000000004067229 */
/* 0x004e0e0000000002 */
/*00e0*/ STG.E.64 [R8.64], R6 ; /* 0x0000000608007986 */
/* 0x001fe2000c101b04 */
/*00f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0100*/ BRA 0x100; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly.
|
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__ void vecAdd(double * a, double * b, double * c, int n) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < n)
c[id] = a[id] + b[id];
}
int main(int argc, char * argv[]) {
int n = 100, i;
double *h_a, *h_b, *h_c;
double *d_a, *d_b, *d_c;
size_t bytes = n * sizeof(double);
h_a = (double*)malloc(bytes);
h_b = (double*)malloc(bytes);
h_c = (double*)malloc(bytes);
cudaMalloc(&d_a, bytes);
cudaMalloc(&d_b, bytes);
cudaMalloc(&d_c, bytes);
for (i = 0; i < n; i++)
h_a[i] = h_b[i] = i;
cudaMemcpy(d_a, h_a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, bytes, cudaMemcpyHostToDevice);
int blockSize, gridSize;
blockSize = 10;
gridSize = (int) ceil((float) n / blockSize);
vecAdd <<<gridSize, blockSize>>>(d_a, d_b, d_c, n);
cudaMemcpy(h_c, d_c, bytes, cudaMemcpyDeviceToHost);
for (i = 0; i < n; i++)
printf(" %f + %f =%f\n", h_a[i], h_b[i], h_c[i]);
cudaFree(d_a);cudaFree(d_b);cudaFree(d_c);
free(h_a);free(h_b);free(h_c);
return 0;
}
|
.file "tmpxft_0010506f_00000000-6_1.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z30__device_stub__Z6vecAddPdS_S_iPdS_S_i
.type _Z30__device_stub__Z6vecAddPdS_S_iPdS_S_i, @function
_Z30__device_stub__Z6vecAddPdS_S_iPdS_S_i:
.LFB2082:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z6vecAddPdS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z30__device_stub__Z6vecAddPdS_S_iPdS_S_i, .-_Z30__device_stub__Z6vecAddPdS_S_iPdS_S_i
.globl _Z6vecAddPdS_S_i
.type _Z6vecAddPdS_S_i, @function
_Z6vecAddPdS_S_i:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z30__device_stub__Z6vecAddPdS_S_iPdS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z6vecAddPdS_S_i, .-_Z6vecAddPdS_S_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string " %f + %f =%f\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $64, %rsp
.cfi_def_cfa_offset 112
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
movl $800, %edi
call malloc@PLT
movq %rax, %r12
movl $800, %edi
call malloc@PLT
movq %rax, %rbp
movl $800, %edi
call malloc@PLT
movq %rax, %r13
leaq 8(%rsp), %rdi
movl $800, %esi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movl $800, %esi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movl $800, %esi
call cudaMalloc@PLT
movl $0, %eax
.L12:
pxor %xmm0, %xmm0
cvtsi2sdl %eax, %xmm0
movsd %xmm0, 0(%rbp,%rax,8)
movsd %xmm0, (%r12,%rax,8)
addq $1, %rax
cmpq $100, %rax
jne .L12
movl $1, %ecx
movl $800, %edx
movq %r12, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $800, %edx
movq %rbp, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl $10, 44(%rsp)
movl $1, 48(%rsp)
movl $10, 32(%rsp)
movl $1, 36(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L19
.L13:
movl $2, %ecx
movl $800, %edx
movq 24(%rsp), %rsi
movq %r13, %rdi
call cudaMemcpy@PLT
movl $0, %ebx
leaq .LC0(%rip), %r14
.L14:
movsd (%r12,%rbx), %xmm0
movsd 0(%r13,%rbx), %xmm2
movsd 0(%rbp,%rbx), %xmm1
movq %r14, %rsi
movl $2, %edi
movl $3, %eax
call __printf_chk@PLT
addq $8, %rbx
cmpq $800, %rbx
jne .L14
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq %r12, %rdi
call free@PLT
movq %rbp, %rdi
call free@PLT
movq %r13, %rdi
call free@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L20
movl $0, %eax
addq $64, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L19:
.cfi_restore_state
movl $100, %ecx
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z30__device_stub__Z6vecAddPdS_S_iPdS_S_i
jmp .L13
.L20:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC1:
.string "_Z6vecAddPdS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z6vecAddPdS_S_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4:
|
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code.
|
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__ void vecAdd(double * a, double * b, double * c, int n) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < n)
c[id] = a[id] + b[id];
}
int main(int argc, char * argv[]) {
int n = 100, i;
double *h_a, *h_b, *h_c;
double *d_a, *d_b, *d_c;
size_t bytes = n * sizeof(double);
h_a = (double*)malloc(bytes);
h_b = (double*)malloc(bytes);
h_c = (double*)malloc(bytes);
cudaMalloc(&d_a, bytes);
cudaMalloc(&d_b, bytes);
cudaMalloc(&d_c, bytes);
for (i = 0; i < n; i++)
h_a[i] = h_b[i] = i;
cudaMemcpy(d_a, h_a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, bytes, cudaMemcpyHostToDevice);
int blockSize, gridSize;
blockSize = 10;
gridSize = (int) ceil((float) n / blockSize);
vecAdd <<<gridSize, blockSize>>>(d_a, d_b, d_c, n);
cudaMemcpy(h_c, d_c, bytes, cudaMemcpyDeviceToHost);
for (i = 0; i < n; i++)
printf(" %f + %f =%f\n", h_a[i], h_b[i], h_c[i]);
cudaFree(d_a);cudaFree(d_b);cudaFree(d_c);
free(h_a);free(h_b);free(h_c);
return 0;
}
|
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__ void vecAdd(double * a, double * b, double * c, int n) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < n)
c[id] = a[id] + b[id];
}
int main(int argc, char * argv[]) {
int n = 100, i;
double *h_a, *h_b, *h_c;
double *d_a, *d_b, *d_c;
size_t bytes = n * sizeof(double);
h_a = (double*)malloc(bytes);
h_b = (double*)malloc(bytes);
h_c = (double*)malloc(bytes);
hipMalloc(&d_a, bytes);
hipMalloc(&d_b, bytes);
hipMalloc(&d_c, bytes);
for (i = 0; i < n; i++)
h_a[i] = h_b[i] = i;
hipMemcpy(d_a, h_a, bytes, hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, bytes, hipMemcpyHostToDevice);
int blockSize, gridSize;
blockSize = 10;
gridSize = (int) ceil((float) n / blockSize);
vecAdd <<<gridSize, blockSize>>>(d_a, d_b, d_c, n);
hipMemcpy(h_c, d_c, bytes, hipMemcpyDeviceToHost);
for (i = 0; i < n; i++)
printf(" %f + %f =%f\n", h_a[i], h_b[i], h_c[i]);
hipFree(d_a);hipFree(d_b);hipFree(d_c);
free(h_a);free(h_b);free(h_c);
return 0;
}
|
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly.
|
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__ void vecAdd(double * a, double * b, double * c, int n) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < n)
c[id] = a[id] + b[id];
}
int main(int argc, char * argv[]) {
int n = 100, i;
double *h_a, *h_b, *h_c;
double *d_a, *d_b, *d_c;
size_t bytes = n * sizeof(double);
h_a = (double*)malloc(bytes);
h_b = (double*)malloc(bytes);
h_c = (double*)malloc(bytes);
hipMalloc(&d_a, bytes);
hipMalloc(&d_b, bytes);
hipMalloc(&d_c, bytes);
for (i = 0; i < n; i++)
h_a[i] = h_b[i] = i;
hipMemcpy(d_a, h_a, bytes, hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, bytes, hipMemcpyHostToDevice);
int blockSize, gridSize;
blockSize = 10;
gridSize = (int) ceil((float) n / blockSize);
vecAdd <<<gridSize, blockSize>>>(d_a, d_b, d_c, n);
hipMemcpy(h_c, d_c, bytes, hipMemcpyDeviceToHost);
for (i = 0; i < n; i++)
printf(" %f + %f =%f\n", h_a[i], h_b[i], h_c[i]);
hipFree(d_a);hipFree(d_b);hipFree(d_c);
free(h_a);free(h_b);free(h_c);
return 0;
}
|
.text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z6vecAddPdS_S_i
.globl _Z6vecAddPdS_S_i
.p2align 8
.type _Z6vecAddPdS_S_i,@function
_Z6vecAddPdS_S_i:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b32 s3, s[0:1], 0x18
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_load_b128 s[4:7], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 3, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
global_load_b64 v[2:3], v[2:3], off
global_load_b64 v[4:5], v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_f64 v[2:3], v[2:3], v[4:5]
global_store_b64 v[0:1], v[2:3], off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z6vecAddPdS_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z6vecAddPdS_S_i, .Lfunc_end0-_Z6vecAddPdS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z6vecAddPdS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z6vecAddPdS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata
|
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly.
|
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__ void vecAdd(double * a, double * b, double * c, int n) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < n)
c[id] = a[id] + b[id];
}
int main(int argc, char * argv[]) {
int n = 100, i;
double *h_a, *h_b, *h_c;
double *d_a, *d_b, *d_c;
size_t bytes = n * sizeof(double);
h_a = (double*)malloc(bytes);
h_b = (double*)malloc(bytes);
h_c = (double*)malloc(bytes);
hipMalloc(&d_a, bytes);
hipMalloc(&d_b, bytes);
hipMalloc(&d_c, bytes);
for (i = 0; i < n; i++)
h_a[i] = h_b[i] = i;
hipMemcpy(d_a, h_a, bytes, hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, bytes, hipMemcpyHostToDevice);
int blockSize, gridSize;
blockSize = 10;
gridSize = (int) ceil((float) n / blockSize);
vecAdd <<<gridSize, blockSize>>>(d_a, d_b, d_c, n);
hipMemcpy(h_c, d_c, bytes, hipMemcpyDeviceToHost);
for (i = 0; i < n; i++)
printf(" %f + %f =%f\n", h_a[i], h_b[i], h_c[i]);
hipFree(d_a);hipFree(d_b);hipFree(d_c);
free(h_a);free(h_b);free(h_c);
return 0;
}
|
.text
.file "1.hip"
.globl _Z21__device_stub__vecAddPdS_S_i # -- Begin function _Z21__device_stub__vecAddPdS_S_i
.p2align 4, 0x90
.type _Z21__device_stub__vecAddPdS_S_i,@function
_Z21__device_stub__vecAddPdS_S_i: # @_Z21__device_stub__vecAddPdS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z6vecAddPdS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z21__device_stub__vecAddPdS_S_i, .Lfunc_end0-_Z21__device_stub__vecAddPdS_S_i
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r12
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
subq $152, %rsp
.cfi_def_cfa_offset 192
.cfi_offset %rbx, -40
.cfi_offset %r12, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl $800, %edi # imm = 0x320
callq malloc
movq %rax, %rbx
movl $800, %edi # imm = 0x320
callq malloc
movq %rax, %r14
movl $800, %edi # imm = 0x320
callq malloc
movq %rax, %r15
leaq 24(%rsp), %rdi
movl $800, %esi # imm = 0x320
callq hipMalloc
leaq 16(%rsp), %rdi
movl $800, %esi # imm = 0x320
callq hipMalloc
leaq 8(%rsp), %rdi
movl $800, %esi # imm = 0x320
callq hipMalloc
xorl %eax, %eax
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
xorps %xmm0, %xmm0
cvtsi2sd %eax, %xmm0
movsd %xmm0, (%r14,%rax,8)
movsd %xmm0, (%rbx,%rax,8)
incq %rax
cmpq $100, %rax
jne .LBB1_1
# %bb.2:
movq 24(%rsp), %rdi
movl $800, %edx # imm = 0x320
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movq 16(%rsp), %rdi
movl $800, %edx # imm = 0x320
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
movabsq $4294967306, %rdi # imm = 0x10000000A
movl $1, %esi
movq %rdi, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_4
# %bb.3:
movq 24(%rsp), %rax
movq 16(%rsp), %rcx
movq 8(%rsp), %rdx
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movq %rdx, 88(%rsp)
movl $100, 36(%rsp)
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 88(%rsp), %rax
movq %rax, 128(%rsp)
leaq 36(%rsp), %rax
movq %rax, 136(%rsp)
leaq 72(%rsp), %rdi
leaq 56(%rsp), %rsi
leaq 48(%rsp), %rdx
leaq 40(%rsp), %rcx
callq __hipPopCallConfiguration
movq 72(%rsp), %rsi
movl 80(%rsp), %edx
movq 56(%rsp), %rcx
movl 64(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z6vecAddPdS_S_i, %edi
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_4:
movq 8(%rsp), %rsi
movl $800, %edx # imm = 0x320
movq %r15, %rdi
movl $2, %ecx
callq hipMemcpy
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB1_5: # =>This Inner Loop Header: Depth=1
movsd (%rbx,%r12,8), %xmm0 # xmm0 = mem[0],zero
movsd (%r14,%r12,8), %xmm1 # xmm1 = mem[0],zero
movsd (%r15,%r12,8), %xmm2 # xmm2 = mem[0],zero
movl $.L.str, %edi
movb $3, %al
callq printf
incq %r12
cmpq $100, %r12
jne .LBB1_5
# %bb.6:
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
movq %r15, %rdi
callq free
xorl %eax, %eax
addq $152, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z6vecAddPdS_S_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z6vecAddPdS_S_i,@object # @_Z6vecAddPdS_S_i
.section .rodata,"a",@progbits
.globl _Z6vecAddPdS_S_i
.p2align 3, 0x0
_Z6vecAddPdS_S_i:
.quad _Z21__device_stub__vecAddPdS_S_i
.size _Z6vecAddPdS_S_i, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz " %f + %f =%f\n"
.size .L.str, 14
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z6vecAddPdS_S_i"
.size .L__unnamed_1, 17
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z21__device_stub__vecAddPdS_S_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z6vecAddPdS_S_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.